[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: When mem event automatically promotes access rights, let other subsystems know



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1323206216 0
# Node ID f24c664557e5e834140833927a5d3f2bab7ca270
# Parent  537ceb11d51ef60cd4abffd2f54de0ae0ca50008
x86/mm: When mem event automatically promotes access rights, let other 
subsystems know

The mem event fault handler in the p2m can automatically promote the access
rights of a p2m entry. In those scenarios, vcpu's are not paused and they will
immediately retry the faulting instructions. This will generate a second fault
if the underlying entry type requires so (paging, unsharing, pod, etc).
Collapse the two faults into a single one.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 537ceb11d51e -r f24c664557e5 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Dec 06 20:31:49 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Tue Dec 06 21:16:56 2011 +0000
@@ -1205,7 +1205,7 @@
     mfn_t mfn;
     struct vcpu *v = current;
     struct p2m_domain *p2m;
-    int rc;
+    int rc, fall_through = 0;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1278,9 +1278,15 @@
 
         if ( violation )
         {
-            p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w, 
access_x);
-            rc = 1;
-            goto out_put_gfn;
+            if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r, 
+                                        access_w, access_x) )
+            {
+                fall_through = 1;
+            } else {
+                /* Rights not promoted, vcpu paused, work here is done */
+                rc = 1;
+                goto out_put_gfn;
+            }
         }
     }
 
@@ -1339,7 +1345,11 @@
         goto out_put_gfn;
     }
 
-    rc = 0;
+    /* If we fell through, the vcpu will retry now that access restrictions 
have
+     * been removed. It may fault again if the p2m entry type still requires 
so.
+     * Otherwise, this is an error condition. */
+    rc = fall_through;
+
 out_put_gfn:
     put_gfn(p2m->domain, gfn);
     return rc;
diff -r 537ceb11d51e -r f24c664557e5 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Dec 06 20:31:49 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Tue Dec 06 21:16:56 2011 +0000
@@ -1084,7 +1084,7 @@
     mem_event_unpause_vcpus(d);
 }
 
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
+bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
                           bool_t access_r, bool_t access_w, bool_t access_x)
 {
     struct vcpu *v = current;
@@ -1105,7 +1105,7 @@
     {
         p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
         p2m_unlock(p2m);
-        return;
+        return 1;
     }
     p2m_unlock(p2m);
 
@@ -1128,12 +1128,13 @@
             p2m_lock(p2m);
             p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
             p2m_unlock(p2m);
+            return 1;
         }
 
-        return;
+        return 0;
     }
     else if ( res > 0 )
-        return;  /* No space in buffer; VCPU paused */
+        return 0;  /* No space in buffer; VCPU paused */
 
     memset(&req, 0, sizeof(req));
     req.type = MEM_EVENT_TYPE_ACCESS;
@@ -1157,6 +1158,7 @@
     mem_event_put_request(d, &d->mem_event->access, &req);
 
     /* VCPU paused, mem event request sent */
+    return 0;
 }
 
 void p2m_mem_access_resume(struct domain *d)
diff -r 537ceb11d51e -r f24c664557e5 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Dec 06 20:31:49 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Tue Dec 06 21:16:56 2011 +0000
@@ -491,8 +491,9 @@
 
 #ifdef __x86_64__
 /* Send mem event based on the access (gla is -1ull if not available).  Handles
- * the rw2rx conversion */
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
+ * the rw2rx conversion. Boolean return value indicates if access rights have 
+ * been promoted with no underlying vcpu pause. */
+bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
                           bool_t access_r, bool_t access_w, bool_t access_x);
 /* Resumes the running of the VCPU, restarting the last instruction */
 void p2m_mem_access_resume(struct domain *d);
@@ -508,10 +509,10 @@
                        hvmmem_access_t *access);
 
 #else
-static inline void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, 
+static inline bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, 
                                         unsigned long gla, bool_t access_r, 
                                         bool_t access_w, bool_t access_x)
-{ }
+{ return 1; }
 static inline int p2m_set_mem_access(struct domain *d, 
                                      unsigned long start_pfn, 
                                      uint32_t nr, hvmmem_access_t access)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.