[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] mem_access: Use monitor_traps instead of mem_access_send_req



The two functions monitor_traps and mem_access_send_req duplicate
some of the same functionality. The mem_access_send_req however leaves a
lot of the standard vm_event fields to be filled by other functions.

Since mem_access events go on the monitor ring in this patch we consolidate
all paths to use monitor_traps to place events on the ring and to fill in
the common parts of the requests.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/arm/p2m.c                | 69 +++++++++++++++++++--------------------
 xen/arch/x86/hvm/hvm.c            | 16 ++++++---
 xen/arch/x86/hvm/monitor.c        |  6 ++++
 xen/arch/x86/mm/p2m.c             | 24 ++------------
 xen/common/mem_access.c           | 11 -------
 xen/include/asm-x86/hvm/monitor.h |  2 ++
 xen/include/asm-x86/p2m.h         | 13 +++++---
 xen/include/xen/mem_access.h      |  7 ----
 8 files changed, 63 insertions(+), 85 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index d82349c..df898a3 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -5,7 +5,7 @@
 #include <xen/domain_page.h>
 #include <xen/bitops.h>
 #include <xen/vm_event.h>
-#include <xen/mem_access.h>
+#include <xen/monitor.h>
 #include <xen/iocap.h>
 #include <public/vm_event.h>
 #include <asm/flushtlb.h>
@@ -1642,12 +1642,41 @@ void __init setup_virt_paging(void)
     smp_call_function(setup_virt_paging_one, (void *)val, 1);
 }
 
+static int
+__p2m_mem_access_send_req(paddr_t gpa, vaddr_t gla, const struct npfec npfec,
+                          xenmem_access_t xma)
+{
+    struct vcpu *v = current;
+    vm_event_request_t req = {};
+    bool_t sync = (xma == XENMEM_access_n2rwx) ? 0 : 1;
+
+    req.reason = VM_EVENT_REASON_MEM_ACCESS;
+
+    /* Send request to mem access subscriber */
+    req.u.mem_access.gfn = gpa >> PAGE_SHIFT;
+    req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+    if ( npfec.gla_valid )
+    {
+        req.u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
+        req.u.mem_access.gla = gla;
+
+        if ( npfec.kind == npfec_kind_with_gla )
+            req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
+        else if ( npfec.kind == npfec_kind_in_gpt )
+            req.u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
+    }
+    req.u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
+    req.u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
+    req.u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
+
+    return monitor_traps(v, sync, &req);
+}
+
 bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
 {
     int rc;
     bool_t violation;
     xenmem_access_t xma;
-    vm_event_request_t *req;
     struct vcpu *v = current;
     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
@@ -1734,40 +1763,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, 
const struct npfec npfec)
         return false;
     }
 
-    req = xzalloc(vm_event_request_t);
-    if ( req )
-    {
-        req->reason = VM_EVENT_REASON_MEM_ACCESS;
-
-        /* Pause the current VCPU */
-        if ( xma != XENMEM_access_n2rwx )
-            req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
-
-        /* Send request to mem access subscriber */
-        req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
-        req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
-        if ( npfec.gla_valid )
-        {
-            req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
-            req->u.mem_access.gla = gla;
-
-            if ( npfec.kind == npfec_kind_with_gla )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
-            else if ( npfec.kind == npfec_kind_in_gpt )
-                req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
-        }
-        req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
-        req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
-        req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
-        req->vcpu_id = v->vcpu_id;
-
-        mem_access_send_req(v->domain, req);
-        xfree(req);
-    }
-
-    /* Pause the current VCPU */
-    if ( xma != XENMEM_access_n2rwx )
-        vm_event_vcpu_pause(v);
+    if ( __p2m_mem_access_send_req(gpa, gla, npfec, xma) < 0 )
+        domain_crash(v->domain);
 
     return false;
 }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index daaee1d..688370d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1707,7 +1707,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     int rc, fall_through = 0, paged = 0;
     int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
-    bool_t ap2m_active;
+    bool_t ap2m_active, sync = 0;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1846,11 +1846,12 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
                 }
             }
 
-            if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
-            {
+            sync = p2m_mem_access_check(gpa, gla, npfec, &req_ptr);
+
+            if ( !sync ) {
                 fall_through = 1;
             } else {
-                /* Rights not promoted, vcpu paused, work here is done */
+                /* Rights not promoted (aka. sync event), work here is done */
                 rc = 1;
                 goto out_put_gfn;
             }
@@ -1956,7 +1957,12 @@ out:
     }
     if ( req_ptr )
     {
-        mem_access_send_req(currd, req_ptr);
+        if ( hvm_monitor_mem_access(curr, sync, req_ptr) < 0 )
+        {
+            /* Crash the domain */
+            rc = 0;
+        }
+
         xfree(req_ptr);
     }
     return rc;
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 7277c12..c7285c6 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -152,6 +152,12 @@ int hvm_monitor_cpuid(unsigned long insn_length)
     return monitor_traps(curr, 1, &req);
 }
 
+int hvm_monitor_mem_access(struct vcpu* v, bool_t sync,
+                           vm_event_request_t *req)
+{
+    return monitor_traps(v, sync, req);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 812dbf6..27f9d26 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1728,13 +1728,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
     if ( req )
     {
         *req_ptr = req;
-        req->reason = VM_EVENT_REASON_MEM_ACCESS;
-
-        /* Pause the current VCPU */
-        if ( p2ma != p2m_access_n2rwx )
-            req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
 
-        /* Send request to mem event */
+        req->reason = VM_EVENT_REASON_MEM_ACCESS;
         req->u.mem_access.gfn = gfn;
         req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
         if ( npfec.gla_valid )
@@ -1750,23 +1745,10 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
-        req->vcpu_id = v->vcpu_id;
-
-        vm_event_fill_regs(req);
-
-        if ( altp2m_active(v->domain) )
-        {
-            req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
-            req->altp2m_idx = vcpu_altp2m(v).p2midx;
-        }
     }
 
-    /* Pause the current VCPU */
-    if ( p2ma != p2m_access_n2rwx )
-        vm_event_vcpu_pause(v);
-
-    /* VCPU may be paused, return whether we promoted automatically */
-    return (p2ma == p2m_access_n2rwx);
+    /* Return whether vCPU pause is required (aka. sync event) */
+    return (p2ma != p2m_access_n2rwx);
 }
 
 static inline
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index b4033f0..82f4bad 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -108,17 +108,6 @@ int mem_access_memop(unsigned long cmd,
     return rc;
 }
 
-int mem_access_send_req(struct domain *d, vm_event_request_t *req)
-{
-    int rc = vm_event_claim_slot(d, &d->vm_event->monitor);
-    if ( rc < 0 )
-        return rc;
-
-    vm_event_put_request(d, &d->vm_event->monitor, req);
-
-    return 0;
-}
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/monitor.h 
b/xen/include/asm-x86/hvm/monitor.h
index a92f3fc..52c1f47 100644
--- a/xen/include/asm-x86/hvm/monitor.h
+++ b/xen/include/asm-x86/hvm/monitor.h
@@ -41,6 +41,8 @@ void hvm_monitor_msr(unsigned int msr, uint64_t value);
 int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
                       unsigned long trap_type, unsigned long insn_length);
 int hvm_monitor_cpuid(unsigned long insn_length);
+int hvm_monitor_mem_access(struct vcpu* v, bool_t sync,
+                           vm_event_request_t *req);
 
 #endif /* __ASM_X86_HVM_MONITOR_H__ */
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 194020e..f4a746f 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -660,11 +660,14 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn, uint64_t buffer);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp);
 
-/* Send mem event based on the access (gla is -1ull if not available).  Handles
- * the rw2rx conversion. Boolean return value indicates if access rights have 
- * been promoted with no underlying vcpu pause. If the req_ptr has been 
populated, 
- * then the caller must put the event in the ring (once having released 
get_gfn*
- * locks -- caller must also xfree the request. */
+/*
+ * Setup vm_event request based on the access (gla is -1ull if not available).
+ * Handles the rw2rx conversion. Boolean return value indicates if event type
+ * is syncronous (aka. requires vCPU pause). If the req_ptr has been populated,
+ * then the caller should use monitor_traps to send the event on the MONITOR
+ * ring. Once having released get_gfn* locks caller must also xfree the
+ * request.
+ */
 bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
                             struct npfec npfec,
                             vm_event_request_t **req_ptr);
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 272f1e4..3d054e0 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -29,7 +29,6 @@
 
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-int mem_access_send_req(struct domain *d, vm_event_request_t *req);
 
 static inline
 void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp)
@@ -47,12 +46,6 @@ int mem_access_memop(unsigned long cmd,
 }
 
 static inline
-int mem_access_send_req(struct domain *d, vm_event_request_t *req)
-{
-    return -ENOSYS;
-}
-
-static inline
 void mem_access_resume(struct vcpu *vcpu, vm_event_response_t *rsp)
 {
     /* Nothing to do. */
-- 
2.8.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.