[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/20] x86/mem_sharing: don't try to unshare twice during page fault



The page was already tried to be unshared in get_gfn_type_access. If that
didn't work, then trying again is pointless. Don't try to send vm_event again
either, simply check if there is a ring or not.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c | 26 +++++++++++++++++---------
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e055114922..8f90841813 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -38,6 +38,7 @@
 #include <xen/warning.h>
 #include <xen/vpci.h>
 #include <xen/nospec.h>
+#include <xen/vm_event.h>
 #include <asm/shadow.h>
 #include <asm/hap.h>
 #include <asm/current.h>
@@ -1706,11 +1707,14 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     struct domain *currd = curr->domain;
     struct p2m_domain *p2m, *hostp2m;
     int rc, fall_through = 0, paged = 0;
-    int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
     bool sync = false;
     unsigned int page_order;
 
+#ifdef CONFIG_MEM_SHARING
+    bool sharing_enomem = false;
+#endif
+
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
      * If this fails, inject a nested page fault into the guest.
@@ -1898,14 +1902,16 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         paged = 1;
 
-    /* Mem sharing: unshare the page and try again */
-    if ( npfec.write_access && (p2mt == p2m_ram_shared) )
+#ifdef CONFIG_MEM_SHARING
+    /* Mem sharing: if still shared on write access then its enomem */
+    if ( npfec.write_access && p2m_is_shared(p2mt) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = mem_sharing_unshare_page(currd, gfn);
+        sharing_enomem = true;
         rc = 1;
         goto out_put_gfn;
     }
+#endif
 
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
@@ -1959,19 +1965,21 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
      */
     if ( paged )
         p2m_mem_paging_populate(currd, gfn);
+
+#ifdef CONFIG_MEM_SHARING
     if ( sharing_enomem )
     {
-        int rv;
-
-        if ( (rv = mem_sharing_notify_enomem(currd, gfn, true)) < 0 )
+        if ( !vm_event_check_ring(currd->vm_event_share) )
         {
             gdprintk(XENLOG_ERR, "Domain %hu attempt to unshare "
-                     "gfn %lx, ENOMEM and no helper (rc %d)\n",
-                     currd->domain_id, gfn, rv);
+                     "gfn %lx, ENOMEM and no helper\n",
+                     currd->domain_id, gfn);
             /* Crash the domain */
             rc = 0;
         }
     }
+#endif
+
     if ( req_ptr )
     {
         if ( monitor_traps(curr, sync, req_ptr) < 0 )
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.