[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 7/9] libxc: introduce soft reset for HVM domains



Add new xc_domain_soft_reset() function which performs so-called 'soft reset'
for an HVM domain. It is being performed in the following way:
- Save HVM context and all HVM params;
- Devour original domain with XEN_DOMCTL_devour;
- Wait till original domain dies or has no pages left;
- Restore HVM context, HVM params, seed grant table.

After that the domain resumes execution from where SHUTDOWN_soft_reset was
called.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 tools/libxc/Makefile               |   1 +
 tools/libxc/include/xenguest.h     |  20 +++
 tools/libxc/xc_domain_soft_reset.c | 282 +++++++++++++++++++++++++++++++++++++
 3 files changed, 303 insertions(+)
 create mode 100644 tools/libxc/xc_domain_soft_reset.c

diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index bd2ca6c..8f8abd6 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -52,6 +52,7 @@ GUEST_SRCS-y += xc_offline_page.c xc_compression.c
 else
 GUEST_SRCS-y += xc_nomigrate.c
 endif
+GUEST_SRCS-y += xc_domain_soft_reset.c
 
 vpath %.c ../../xen/common/libelf
 CFLAGS += -I../../xen/common/libelf
diff --git a/tools/libxc/include/xenguest.h b/tools/libxc/include/xenguest.h
index 40bbac8..770cd10 100644
--- a/tools/libxc/include/xenguest.h
+++ b/tools/libxc/include/xenguest.h
@@ -131,6 +131,26 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
  * of the new domain is automatically appended to the filename,
  * separated by a ".".
  */
+
+/**
+ * This function does soft reset for a domain. During soft reset all
+ * source domain's memory is being reassigned to the destination domain,
+ * HVM context and HVM params are being copied.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm source_dom the id of the source domain
+ * @parm dest_dom the id of the destination domain
+ * @parm console_domid the id of the domain handling console
+ * @parm console_mfn returned with the mfn of the console page
+ * @parm store_domid the id of the domain handling store
+ * @parm store_mfn returned with the mfn of the store page
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_soft_reset(xc_interface *xch, uint32_t source_dom,
+                         uint32_t dest_dom, domid_t console_domid,
+                         unsigned long *console_mfn, domid_t store_domid,
+                         unsigned long *store_mfn);
+
 #define XC_DEVICE_MODEL_RESTORE_FILE "/var/lib/xen/qemu-resume"
 
 /**
diff --git a/tools/libxc/xc_domain_soft_reset.c 
b/tools/libxc/xc_domain_soft_reset.c
new file mode 100644
index 0000000..24d0b48
--- /dev/null
+++ b/tools/libxc/xc_domain_soft_reset.c
@@ -0,0 +1,282 @@
+/******************************************************************************
+ * xc_domain_soft_reset.c
+ *
+ * Do soft reset.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  
USA
+ */
+
+#include <inttypes.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "xc_private.h"
+#include "xc_core.h"
+#include "xc_bitops.h"
+#include "xc_dom.h"
+#include "xg_private.h"
+#include "xg_save_restore.h"
+
+#include <xen/hvm/params.h>
+
+#define SLEEP_INT 1
+
+int xc_domain_soft_reset(xc_interface *xch, uint32_t source_dom,
+                         uint32_t dest_dom, domid_t console_domid,
+                         unsigned long *console_mfn, domid_t store_domid,
+                         unsigned long *store_mfn)
+{
+    xc_dominfo_t old_info, new_info;
+    int rc = 1;
+
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    unsigned long console_pfn, store_pfn, io_pfn, buffio_pfn;
+    unsigned long max_gpfn;
+    uint64_t hvm_params[HVM_NR_PARAMS];
+    xen_pfn_t sharedinfo_pfn;
+
+    DPRINTF("%s: soft reset domid %u -> %u", __func__, source_dom, dest_dom);
+
+    if ( xc_domain_getinfo(xch, source_dom, 1, &old_info) != 1 )
+    {
+        PERROR("Could not get old domain info");
+        return 1;
+    }
+
+    if ( xc_domain_getinfo(xch, dest_dom, 1, &new_info) != 1 )
+    {
+        PERROR("Could not get new domain info");
+        return 1;
+    }
+
+    if ( !old_info.hvm || !new_info.hvm )
+    {
+        PERROR("Soft reset is supported for HVM only");
+        return 1;
+    }
+
+    max_gpfn = xc_domain_maximum_gpfn(xch, source_dom);
+
+    sharedinfo_pfn = old_info.shared_info_frame;
+    if ( xc_get_pfn_type_batch(xch, source_dom, 1, &sharedinfo_pfn) )
+    {
+        PERROR("xc_get_pfn_type_batch failed");
+        goto out;
+    }
+
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, source_dom, 0, 0);
+    if ( hvm_buf_size == -1 )
+    {
+        PERROR("Couldn't get HVM context size from Xen");
+        goto out;
+    }
+
+    hvm_buf = malloc(hvm_buf_size);
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory");
+        goto out;
+    }
+
+    if ( xc_domain_hvm_getcontext(xch, source_dom, hvm_buf,
+                                  hvm_buf_size) == -1 )
+    {
+        PERROR("HVM:Could not get hvm buffer");
+        goto out;
+    }
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_STORE_PFN,
+                     &hvm_params[HVM_PARAM_STORE_PFN]);
+    store_pfn = hvm_params[HVM_PARAM_STORE_PFN];
+    *store_mfn = store_pfn;
+
+    xc_hvm_param_get(xch, source_dom,
+                     HVM_PARAM_CONSOLE_PFN,
+                     &hvm_params[HVM_PARAM_CONSOLE_PFN]);
+    console_pfn = hvm_params[HVM_PARAM_CONSOLE_PFN];
+    *console_mfn = console_pfn;
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_BUFIOREQ_PFN,
+                     &hvm_params[HVM_PARAM_BUFIOREQ_PFN]);
+    buffio_pfn = hvm_params[HVM_PARAM_BUFIOREQ_PFN];
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IOREQ_PFN,
+                     &hvm_params[HVM_PARAM_IOREQ_PFN]);
+    io_pfn = hvm_params[HVM_PARAM_IOREQ_PFN];
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IDENT_PT,
+                     &hvm_params[HVM_PARAM_IDENT_PT]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_PAGING_RING_PFN,
+                     &hvm_params[HVM_PARAM_PAGING_RING_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_ACCESS_RING_PFN,
+                     &hvm_params[HVM_PARAM_ACCESS_RING_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VM86_TSS,
+                     &hvm_params[HVM_PARAM_VM86_TSS]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_ACPI_IOPORTS_LOCATION,
+                     &hvm_params[HVM_PARAM_ACPI_IOPORTS_LOCATION]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VIRIDIAN,
+                     &hvm_params[HVM_PARAM_VIRIDIAN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_PAE_ENABLED,
+                     &hvm_params[HVM_PARAM_PAE_ENABLED]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_STORE_EVTCHN,
+                     &hvm_params[HVM_PARAM_STORE_EVTCHN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     &hvm_params[HVM_PARAM_IOREQ_SERVER_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     &hvm_params[HVM_PARAM_NR_IOREQ_SERVER_PAGES]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VM_GENERATION_ID_ADDR,
+                     &hvm_params[HVM_PARAM_VM_GENERATION_ID_ADDR]);
+
+    rc = xc_domain_devour(xch, source_dom, dest_dom);
+    if ( rc != 0 )
+    {
+        PERROR("failed to devour original domain, rc=%d\n", rc);
+        goto out;
+    }
+
+    while ( 1 )
+    {
+        sleep(SLEEP_INT);
+        if ( xc_get_tot_pages(xch, source_dom) <= 0 )
+        {
+            DPRINTF("All pages were transferred");
+            break;
+        }
+    }
+
+
+    if ( sharedinfo_pfn == XEN_DOMCTL_PFINFO_XTAB)
+    {
+        /*
+         * Shared info frame is being removed when guest maps shared info so
+         * this page is likely XEN_DOMCTL_PFINFO_XTAB but we need to replace
+         * it with an empty page in that case.
+         */
+
+        if ( xc_domain_populate_physmap_exact(xch, dest_dom, 1, 0, 0,
+                                              &old_info.shared_info_frame) )
+        {
+            PERROR("failed to populate pfn %lx (shared info)", 
old_info.shared_info_frame);
+            goto out;
+        }
+    }
+
+    if ( xc_domain_hvm_setcontext(xch, dest_dom, hvm_buf,
+                                  hvm_buf_size) == -1 )
+    {
+        PERROR("HVM:Could not set hvm buffer");
+        goto out;
+    }
+
+    if ( store_pfn )
+        xc_clear_domain_page(xch, dest_dom, store_pfn);
+
+    if ( console_pfn )
+        xc_clear_domain_page(xch, dest_dom, console_pfn);
+
+    if ( buffio_pfn )
+        xc_clear_domain_page(xch, dest_dom, buffio_pfn);
+
+    if ( io_pfn )
+        xc_clear_domain_page(xch, dest_dom, io_pfn);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_STORE_PFN,
+                     hvm_params[HVM_PARAM_STORE_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom,
+                     HVM_PARAM_CONSOLE_PFN,
+                     hvm_params[HVM_PARAM_CONSOLE_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_BUFIOREQ_PFN,
+                     hvm_params[HVM_PARAM_BUFIOREQ_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IOREQ_PFN,
+                     hvm_params[HVM_PARAM_IOREQ_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IDENT_PT,
+                     hvm_params[HVM_PARAM_IDENT_PT]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_PAGING_RING_PFN,
+                     hvm_params[HVM_PARAM_PAGING_RING_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_ACCESS_RING_PFN,
+                     hvm_params[HVM_PARAM_ACCESS_RING_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VM86_TSS,
+                     hvm_params[HVM_PARAM_VM86_TSS]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_ACPI_IOPORTS_LOCATION,
+                     hvm_params[HVM_PARAM_ACPI_IOPORTS_LOCATION]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VIRIDIAN,
+                     hvm_params[HVM_PARAM_VIRIDIAN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_PAE_ENABLED,
+                     hvm_params[HVM_PARAM_PAE_ENABLED]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_STORE_EVTCHN,
+                     hvm_params[HVM_PARAM_STORE_EVTCHN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     hvm_params[HVM_PARAM_IOREQ_SERVER_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     hvm_params[HVM_PARAM_NR_IOREQ_SERVER_PAGES]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VM_GENERATION_ID_ADDR,
+                     hvm_params[HVM_PARAM_VM_GENERATION_ID_ADDR]);
+
+    if (xc_dom_gnttab_hvm_seed(xch, dest_dom, console_pfn, store_pfn,
+                               console_domid, store_domid))
+    {
+        PERROR("error seeding hvm grant table");
+        goto out;
+    }
+
+    rc = 0;
+out:
+    if (hvm_buf) free(hvm_buf);
+
+    if ( (rc != 0) && (dest_dom != 0) ) {
+            PERROR("Faled to perform soft reset, destroying domain %d",
+                   dest_dom);
+           xc_domain_destroy(xch, dest_dom);
+    }
+
+    return !!rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.