[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 19/20] x86/mem_sharing: reset a fork



Implement hypercall that allows a fork to shed all memory that got allocated
for it during its execution and re-load its vCPU context from the parent VM.
This allows the forked VM to reset into the same state the parent VM is in a
faster way then creating a new fork would be. Measurements show about a 2x
speedup during normal fuzzing operations. Performance may vary depending how
much memory got allocated for the forked VM. If it has been completely
deduplicated from the parent VM then creating a new fork would likely be more
performant.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c | 105 ++++++++++++++++++++++++++++++++++
 xen/include/public/memory.h   |   1 +
 2 files changed, 106 insertions(+)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e93ad2ec5a..4735a334b9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1622,6 +1622,87 @@ static int mem_sharing_fork(struct domain *d, struct 
domain *cd)
     return 0;
 }
 
+struct gfn_free;
+struct gfn_free {
+    struct gfn_free *next;
+    struct page_info *page;
+    gfn_t gfn;
+};
+
+static int mem_sharing_fork_reset(struct domain *d, struct domain *cd)
+{
+    int rc;
+
+    struct p2m_domain* p2m = p2m_get_hostp2m(cd);
+    struct gfn_free *list = NULL;
+    struct page_info *page;
+
+    page_list_for_each(page, &cd->page_list)
+    {
+        mfn_t mfn = page_to_mfn(page);
+        if ( mfn_valid(mfn) )
+        {
+            p2m_type_t p2mt;
+            p2m_access_t p2ma;
+            gfn_t gfn = mfn_to_gfn(cd, mfn);
+            mfn = __get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma,
+                                        0, NULL, false);
+            if ( p2m_is_ram(p2mt) )
+            {
+                struct gfn_free *gfn_free;
+                if ( !get_page(page, cd) )
+                    goto err_reset;
+
+                /*
+                 * We can't free the page while iterating over the page_list
+                 * so we build a separate list to loop over.
+                 *
+                 * We want to iterate over the page_list instead of checking
+                 * gfn from 0 to max_gfn because this is ~10x faster.
+                 */
+                gfn_free = xmalloc(struct gfn_free);
+                if ( !gfn_free )
+                    goto err_reset;
+
+                gfn_free->gfn = gfn;
+                gfn_free->page = page;
+                gfn_free->next = list;
+                list = gfn_free;
+            }
+        }
+    }
+
+    while ( list )
+    {
+        struct gfn_free *next = list->next;
+
+        rc = p2m->set_entry(p2m, list->gfn, INVALID_MFN, PAGE_ORDER_4K,
+                            p2m_invalid, p2m_access_rwx, -1);
+        put_page_alloc_ref(list->page);
+        put_page(list->page);
+
+        xfree(list);
+        list = next;
+
+        ASSERT(!rc);
+    }
+
+    if ( (rc = fork_hvm(d, cd)) )
+        return rc;
+
+ err_reset:
+    while ( list )
+    {
+        struct gfn_free *next = list->next;
+
+        put_page(list->page);
+        xfree(list);
+        list = next;
+    }
+
+    return 0;
+}
+
 int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 {
     int rc;
@@ -1905,6 +1986,30 @@ int 
mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
             rcu_unlock_domain(pd);
             break;
         }
+
+        case XENMEM_sharing_op_fork_reset:
+        {
+            struct domain *pd;
+
+            rc = -EINVAL;
+            if ( mso.u.fork._pad[0] || mso.u.fork._pad[1] ||
+                 mso.u.fork._pad[2] )
+                 goto out;
+
+            rc = -ENOSYS;
+            if ( !d->parent )
+                goto out;
+
+            rc = rcu_lock_live_remote_domain_by_id(d->parent->domain_id, &pd);
+            if ( rc )
+                goto out;
+
+            rc = mem_sharing_fork_reset(pd, d);
+
+            rcu_unlock_domain(pd);
+            break;
+        }
+
         default:
             rc = -ENOSYS;
             break;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 90a3f4498e..e3d063e22e 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -483,6 +483,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
 #define XENMEM_sharing_op_audit             7
 #define XENMEM_sharing_op_range_share       8
 #define XENMEM_sharing_op_fork              9
+#define XENMEM_sharing_op_fork_reset        10
 
 #define XENMEM_SHARING_OP_S_HANDLE_INVALID  (-10)
 #define XENMEM_SHARING_OP_C_HANDLE_INVALID  (-9)
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.