[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Xen-Devel] [PATCH] [GSOC14] refactored mempaging code from xenpaging to libxc.



This patch is part of the work done under the gsoc project -
Lazy Restore Using Memory Paging.

This patch moves the code to initialize mempaging from xenpaging to libxc.
The code refractored from xenpaging is the code which sets up paging,
initializes a shared ring and event channel to communicate with xen. This
communication is done between the hypervisor and the dom0 tool which performs
the activity of pager. The xenpaging code is changed to use the newly created
routines and is tested to properly build and work with this code.

The refractoring is done so that any tool which will act as pager in
lazy restore or use memory paging can use a same routines to initialize 
mempaging.
This refactoring will also allow any future (in-tree) tools to use mempaging.

Signed-off-by: Dushyant Behl <myselfdushyantbehl@xxxxxxxxx>
Reviewed-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
---
 tools/libxc/Makefile                |   3 +
 tools/libxc/xc_mem_paging_setup.c   | 132 ++++++++++++++++++++++++++++++++++++
 tools/libxc/xenctrl.h               |  12 ++++
 tools/ocaml/libs/xc/xenctrl_stubs.c |  11 +--
 tools/xenpaging/Makefile            |   4 +-
 tools/xenpaging/xenpaging.c         |  93 +++----------------------
 6 files changed, 167 insertions(+), 88 deletions(-)
 create mode 100644 tools/libxc/xc_mem_paging_setup.c

diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index a74b19e..2439853 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -69,6 +69,9 @@ GUEST_SRCS-$(CONFIG_ARM)     += xc_dom_armzimageloader.c
 GUEST_SRCS-y                 += xc_dom_binloader.c
 GUEST_SRCS-y                 += xc_dom_compat_linux.c
 
+# mem paging setup
+GUEST_SRCS-y                 += xc_mem_paging_setup.c
+
 GUEST_SRCS-$(CONFIG_X86)     += xc_dom_x86.c
 GUEST_SRCS-$(CONFIG_X86)     += xc_cpuid_x86.c
 GUEST_SRCS-$(CONFIG_X86)     += xc_hvm_build_x86.c
diff --git a/tools/libxc/xc_mem_paging_setup.c 
b/tools/libxc/xc_mem_paging_setup.c
new file mode 100644
index 0000000..125d203
--- /dev/null
+++ b/tools/libxc/xc_mem_paging_setup.c
@@ -0,0 +1,132 @@
+/*
+ * tools/libxc/xc_mem_paging_setup.c
+ *
+ * Routines to initialize memory paging. Create shared ring
+ * and event channels to communicate with the hypervisor.
+ *
+ * Copyright (c) 2014 Dushyant Behl
+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ * 
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 
USA
+ */
+
+#include "xc_private.h"
+#include <xen/event_channel.h>
+#include <xen/mem_event.h>
+
+/*
+ * Mem paging ring and event channel setup routine.
+ * Setup a shared ring and an event channel to communicate between
+ * hypervisor and the tool performing mem paging operations.
+ */
+int xc_mem_paging_ring_setup(xc_interface *xch, domid_t domain_id,
+                                void *ring_page, int *port,
+                                uint32_t *evtchn_port,
+                                xc_evtchn **xce_handle,
+                                mem_event_back_ring_t *back_ring)
+{
+    int rc;
+    unsigned long ring_pfn, mmap_pfn;
+
+    /* Map the ring page */
+    xc_get_hvm_param(xch, domain_id, HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
+    mmap_pfn = ring_pfn;
+    ring_page = xc_map_foreign_batch(xch, domain_id,
+                                        PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+    if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    {
+        /* Map failed, populate ring page */
+        rc = xc_domain_populate_physmap_exact(xch, domain_id,
+                                              1, 0, 0, &ring_pfn);
+        if ( rc != 0 )
+        {
+            PERROR("Failed to populate ring gfn\n");
+            return 1;
+        }
+
+        mmap_pfn = ring_pfn;
+        ring_page = xc_map_foreign_batch(xch, domain_id,
+                                        PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+
+        if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+        {
+            PERROR("Could not map the ring page\n");
+            return 1;
+        }
+    }
+
+    /* Initialise Xen */
+    rc = xc_mem_paging_enable(xch, domain_id, evtchn_port);
+    if ( rc != 0 )
+    {
+        switch ( errno ) {
+            case EBUSY:
+                ERROR("mempaging is (or was) active on this domain");
+                break;
+            case ENODEV:
+                ERROR("mempaging requires Hardware Assisted Paging");
+                break;
+            case EMLINK:
+                ERROR("mempaging not supported while iommu passthrough is 
enabled");
+                break;
+            case EXDEV:
+                ERROR("mempaging not supported in a PoD guest");
+                break;
+            default:
+                PERROR("mempaging: error initialising shared page");
+                break;
+        }
+        return 1;
+    }
+
+    /* Open event channel */
+    *xce_handle = xc_evtchn_open(NULL, 0);
+    if ( *xce_handle == NULL )
+    {
+        PERROR("Failed to open event channel");
+        return 1;
+    }
+
+    /* Bind event notification */
+    rc = xc_evtchn_bind_interdomain(*xce_handle, domain_id, *evtchn_port);
+    if ( rc < 0 )
+    {
+        PERROR("Failed to bind event channel");
+        return 1;
+    }
+    *port = rc;
+
+    /* Initialise ring */
+    SHARED_RING_INIT((mem_event_sring_t *)ring_page);
+    BACK_RING_INIT(back_ring, (mem_event_sring_t *)ring_page, PAGE_SIZE);
+
+    /* Now that the ring is set, remove it from the guest's physmap */
+    if ( xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn) 
)
+    {
+        PERROR("Failed to remove ring from guest physmap");
+        return 1;
+    }
+
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 02129f7..c871a2f 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -47,6 +47,7 @@
 #include <xen/xsm/flask_op.h>
 #include <xen/tmem.h>
 #include <xen/kexec.h>
+#include <xen/mem_event.h>
 
 #include "xentoollog.h"
 
@@ -2039,6 +2040,17 @@ int xc_mem_paging_prep(xc_interface *xch, domid_t 
domain_id, unsigned long gfn);
 int xc_mem_paging_load(xc_interface *xch, domid_t domain_id, 
                         unsigned long gfn, void *buffer);
 
+/*
+ * Mem paging ring and event channel setup routine.
+ * Setup a shared ring and an event channel to communicate between
+ * hypervisor and the tool performing mem paging operations.
+ */
+int xc_mem_paging_ring_setup(xc_interface *xch, domid_t domain_id,
+                                void *ring_page, int *port,
+                                uint32_t *evtchn_port,
+                                xc_evtchn **xceh,
+                                mem_event_back_ring_t *_back_ring);
+
 /** 
  * Access tracking operations.
  * Supported only on Intel EPT 64 bit processors.
diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c 
b/tools/ocaml/libs/xc/xenctrl_stubs.c
index ff29b47..37a4db7 100644
--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
+++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
@@ -521,13 +521,16 @@ CAMLprim value stub_xc_evtchn_reset(value xch, value 
domid)
        CAMLreturn(Val_unit);
 }
 
-
-#define RING_SIZE 32768
-static char ring[RING_SIZE];
+/*
+ * The name is kept BUF_RING_SIZE, because the name RING_SIZE 
+ * collides with the xen shared ring definitions in io/ring.h
+ */
+#define BUF_RING_SIZE 32768
+static char ring[BUF_RING_SIZE];
 
 CAMLprim value stub_xc_readconsolering(value xch)
 {
-       unsigned int size = RING_SIZE - 1;
+       unsigned int size = BUF_RING_SIZE - 1;
        char *ring_ptr = ring;
        int retval;
 
diff --git a/tools/xenpaging/Makefile b/tools/xenpaging/Makefile
index 548d9dd..ea370d3 100644
--- a/tools/xenpaging/Makefile
+++ b/tools/xenpaging/Makefile
@@ -1,8 +1,8 @@
 XEN_ROOT=$(CURDIR)/../..
 include $(XEN_ROOT)/tools/Rules.mk
 
-CFLAGS += $(CFLAGS_libxenctrl) $(CFLAGS_libxenstore) $(PTHREAD_CFLAGS)
-LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS_libxenstore) $(PTHREAD_LIBS)
+CFLAGS += $(CFLAGS_libxenctrl) $(CFLAGS_libxenguest) $(CFLAGS_libxenstore) 
$(PTHREAD_CFLAGS)
+LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS_libxenguest) $(LDLIBS_libxenstore) 
$(PTHREAD_LIBS)
 LDFLAGS += $(PTHREAD_LDFLAGS)
 
 POLICY    = default
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index 5ef2f09..2b81408 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -281,7 +281,6 @@ static struct xenpaging *xenpaging_init(int argc, char 
*argv[])
     xentoollog_logger *dbg = NULL;
     char *p;
     int rc;
-    unsigned long ring_pfn, mmap_pfn;
 
     /* Allocate memory */
     paging = calloc(1, sizeof(struct xenpaging));
@@ -338,91 +337,21 @@ static struct xenpaging *xenpaging_init(int argc, char 
*argv[])
         goto err;
     }
 
-    /* Map the ring page */
-    xc_get_hvm_param(xch, paging->mem_event.domain_id, 
-                        HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
-    mmap_pfn = ring_pfn;
-    paging->mem_event.ring_page = 
-        xc_map_foreign_batch(xch, paging->mem_event.domain_id, 
-                                PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-    if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
-    {
-        /* Map failed, populate ring page */
-        rc = xc_domain_populate_physmap_exact(paging->xc_handle, 
-                                              paging->mem_event.domain_id,
-                                              1, 0, 0, &ring_pfn);
-        if ( rc != 0 )
-        {
-            PERROR("Failed to populate ring gfn\n");
-            goto err;
-        }
-
-        mmap_pfn = ring_pfn;
-        paging->mem_event.ring_page = 
-            xc_map_foreign_batch(xch, paging->mem_event.domain_id, 
-                                    PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-        if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
-        {
-            PERROR("Could not map the ring page\n");
-            goto err;
-        }
-    }
-    
-    /* Initialise Xen */
-    rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
-                             &paging->mem_event.evtchn_port);
-    if ( rc != 0 )
-    {
-        switch ( errno ) {
-            case EBUSY:
-                ERROR("xenpaging is (or was) active on this domain");
-                break;
-            case ENODEV:
-                ERROR("xenpaging requires Hardware Assisted Paging");
-                break;
-            case EMLINK:
-                ERROR("xenpaging not supported while iommu passthrough is 
enabled");
-                break;
-            case EXDEV:
-                ERROR("xenpaging not supported in a PoD guest");
-                break;
-            default:
-                PERROR("Error initialising shared page");
-                break;
-        }
-        goto err;
-    }
-
-    /* Open event channel */
-    paging->mem_event.xce_handle = xc_evtchn_open(NULL, 0);
-    if ( paging->mem_event.xce_handle == NULL )
-    {
-        PERROR("Failed to open event channel");
-        goto err;
-    }
-
-    /* Bind event notification */
-    rc = xc_evtchn_bind_interdomain(paging->mem_event.xce_handle,
+    /* Initialize paging by setting up ring and event channel. */
+    rc = xc_mem_paging_ring_setup(paging->xc_handle,
                                     paging->mem_event.domain_id,
-                                    paging->mem_event.evtchn_port);
-    if ( rc < 0 )
-    {
-        PERROR("Failed to bind event channel");
+                                    paging->mem_event.ring_page,
+                                    &paging->mem_event.port,
+                                    &paging->mem_event.evtchn_port,
+                                    &paging->mem_event.xce_handle,
+                                    &paging->mem_event.back_ring);
+    if( rc )
+    {
+        PERROR("Could not initialize mem paging\n");
         goto err;
     }
 
-    paging->mem_event.port = rc;
-
-    /* Initialise ring */
-    SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page);
-    BACK_RING_INIT(&paging->mem_event.back_ring,
-                   (mem_event_sring_t *)paging->mem_event.ring_page,
-                   PAGE_SIZE);
-
-    /* Now that the ring is set, remove it from the guest's physmap */
-    if ( xc_domain_decrease_reservation_exact(xch, 
-                    paging->mem_event.domain_id, 1, 0, &ring_pfn) )
-        PERROR("Failed to remove ring from guest physmap");
+    DPRINTF("ring and event channel setup successful\n");
 
     /* Get max_pages from guest if not provided via cmdline */
     if ( !paging->max_pages )
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.