[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFCv3 6/8] libxc: introduce soft reset for HVM domains



Add new xc_domain_soft_reset() function which performs so-called 'soft reset'
for an HVM domain. It is being performed in the following way:
- Save HVM context and all HVM params;
- Set a recipient domain (an empty one with the same config) with
  XEN_DOMCTL_set_recipient;
- The source domain is destroyed;
- If everything goes well all memory gets reassigned to the new domain;
- If we have some leftovers left (that means something went wrong during
  destroy) just copy all remaining pages;
- Restore HVM context, HVM params, seed grant table

After that the domain resumes execution from where SHUTDOWN_soft_reset was
called.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 tools/libxc/Makefile               |   1 +
 tools/libxc/xc_domain_soft_reset.c | 394 +++++++++++++++++++++++++++++++++++++
 tools/libxc/xenguest.h             |  20 ++
 3 files changed, 415 insertions(+)
 create mode 100644 tools/libxc/xc_domain_soft_reset.c

diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 3b04027..b5d4b60 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -49,6 +49,7 @@ GUEST_SRCS-y += xc_offline_page.c xc_compression.c
 else
 GUEST_SRCS-y += xc_nomigrate.c
 endif
+GUEST_SRCS-y += xc_domain_soft_reset.c
 
 vpath %.c ../../xen/common/libelf
 CFLAGS += -I../../xen/common/libelf
diff --git a/tools/libxc/xc_domain_soft_reset.c 
b/tools/libxc/xc_domain_soft_reset.c
new file mode 100644
index 0000000..495b7e6
--- /dev/null
+++ b/tools/libxc/xc_domain_soft_reset.c
@@ -0,0 +1,394 @@
+/******************************************************************************
+ * xc_domain_soft_reset.c
+ *
+ * Do soft reset.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  
USA
+ */
+
+#include <inttypes.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "xc_private.h"
+#include "xc_core.h"
+#include "xc_bitops.h"
+#include "xc_dom.h"
+#include "xg_private.h"
+#include "xg_save_restore.h"
+
+#include <xen/hvm/params.h>
+
+#define SLEEP_INT 5
+
+static int get_pfn_types(xc_interface *xch, uint32_t dom, uint64_t *pfn_buf,
+                         unsigned long max_gpfn)
+{
+    int size;
+    unsigned long pfn, pfn2;
+    int rc = -1;
+
+    for (pfn = 0; pfn < max_gpfn + 1; pfn+= 1024)
+    {
+        if (pfn + 1024 <= max_gpfn + 1)
+            size = 1024;
+        else
+            size = max_gpfn - pfn + 1;
+
+        for (pfn2 = pfn; pfn2 < pfn + size; pfn2++)
+            pfn_buf[pfn2] = pfn2;
+
+        if ( xc_get_pfn_type_batch(xch, dom, size, &pfn_buf[pfn]) )
+        {
+            PERROR("xc_get_pfn_type_batch failed");
+            goto out;
+        }
+    }
+    rc = 0;
+out:
+    return rc;
+}
+
+int xc_domain_soft_reset(xc_interface *xch, uint32_t source_dom,
+                         uint32_t dest_dom, domid_t console_domid,
+                         unsigned long *console_mfn, domid_t store_domid,
+                         unsigned long *store_mfn)
+{
+    xc_dominfo_t old_info, new_info;
+    int rc = 1;
+
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    unsigned long console_pfn, store_pfn, io_pfn, buffio_pfn;
+    unsigned long pfn, max_gpfn, num_pages, num_pages_new;
+    uint64_t hvm_params[HVM_NR_PARAMS];
+    uint64_t *pfn_buf_old = NULL,*pfn_buf_new = NULL;
+    void *source_pg, *dest_pg;
+
+    DPRINTF("%s: soft reset domid %u -> %u", __func__, source_dom, dest_dom);
+
+    if ( xc_domain_getinfo(xch, source_dom, 1, &old_info) != 1 )
+    {
+        PERROR("Could not get old domain info");
+        return 1;
+    }
+
+    if ( xc_domain_getinfo(xch, dest_dom, 1, &new_info) != 1 )
+    {
+        PERROR("Could not get new domain info");
+        return 1;
+    }
+
+    if ( !old_info.hvm || !new_info.hvm )
+    {
+        PERROR("Soft reset is supported for HVM only");
+        return 1;
+    }
+
+    max_gpfn = xc_domain_maximum_gpfn(xch, source_dom);
+    pfn_buf_old = (uint64_t *)malloc((max_gpfn + 1) * sizeof(uint64_t));
+
+    if ( !pfn_buf_old  )
+    {
+        ERROR("Couldn't allocate memory");
+        goto out;
+    }
+
+    if ( get_pfn_types(xch, source_dom, pfn_buf_old, max_gpfn) )
+    {
+        goto out;
+    }
+
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, source_dom, 0, 0);
+    if ( hvm_buf_size == -1 )
+    {
+        PERROR("Couldn't get HVM context size from Xen");
+        goto out;
+    }
+
+    hvm_buf = malloc(hvm_buf_size);
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory");
+        goto out;
+    }
+
+    if ( xc_domain_hvm_getcontext(xch, source_dom, hvm_buf,
+                                  hvm_buf_size) == -1 )
+    {
+        PERROR("HVM:Could not get hvm buffer");
+        goto out;
+    }
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_STORE_PFN,
+                     &hvm_params[HVM_PARAM_STORE_PFN]);
+    store_pfn = hvm_params[HVM_PARAM_STORE_PFN];
+    *store_mfn = store_pfn;
+
+    xc_hvm_param_get(xch, source_dom,
+                     HVM_PARAM_CONSOLE_PFN,
+                     &hvm_params[HVM_PARAM_CONSOLE_PFN]);
+    console_pfn = hvm_params[HVM_PARAM_CONSOLE_PFN];
+    *console_mfn = console_pfn;
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_BUFIOREQ_PFN,
+                     &hvm_params[HVM_PARAM_BUFIOREQ_PFN]);
+    buffio_pfn = hvm_params[HVM_PARAM_BUFIOREQ_PFN];
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IOREQ_PFN,
+                     &hvm_params[HVM_PARAM_IOREQ_PFN]);
+    io_pfn = hvm_params[HVM_PARAM_IOREQ_PFN];
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IDENT_PT,
+                     &hvm_params[HVM_PARAM_IDENT_PT]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_PAGING_RING_PFN,
+                     &hvm_params[HVM_PARAM_PAGING_RING_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_ACCESS_RING_PFN,
+                     &hvm_params[HVM_PARAM_ACCESS_RING_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VM86_TSS,
+                     &hvm_params[HVM_PARAM_VM86_TSS]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_ACPI_IOPORTS_LOCATION,
+                     &hvm_params[HVM_PARAM_ACPI_IOPORTS_LOCATION]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VIRIDIAN,
+                     &hvm_params[HVM_PARAM_VIRIDIAN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_PAE_ENABLED,
+                     &hvm_params[HVM_PARAM_PAE_ENABLED]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_STORE_EVTCHN,
+                     &hvm_params[HVM_PARAM_STORE_EVTCHN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     &hvm_params[HVM_PARAM_IOREQ_SERVER_PFN]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     &hvm_params[HVM_PARAM_NR_IOREQ_SERVER_PAGES]);
+
+    xc_hvm_param_get(xch, source_dom, HVM_PARAM_VM_GENERATION_ID_ADDR,
+                     &hvm_params[HVM_PARAM_VM_GENERATION_ID_ADDR]);
+
+    rc = xc_domain_set_recipient(xch, source_dom, dest_dom);
+    if ( rc != 0 )
+    {
+        PERROR("failed to set recipient, rc=%d\n", rc);
+        goto out;
+    }
+
+    rc = xc_domain_destroy(xch, source_dom);
+    if ( rc != 0 )
+    {
+        PERROR("failed to desrtoy source domain, rc=%d\n", rc);
+        goto out;
+    }
+
+    num_pages = 0;
+    while ( 1 )
+    {
+        sleep(SLEEP_INT);
+        num_pages_new = xc_get_tot_pages(xch, dest_dom);
+        if ( num_pages_new == num_pages)
+        {
+            DPRINTF("Stopped at %lu pages, %lu pages left to copy", num_pages,
+                    old_info.nr_pages - num_pages);
+            break;
+        }
+        num_pages = num_pages_new;
+    }
+
+    if ( num_pages == old_info.nr_pages )
+    {
+        DPRINTF("All pages were transferred");
+    }
+    else
+    {
+        pfn_buf_new = (uint64_t *)malloc((max_gpfn + 1) * sizeof(uint64_t));
+
+        if ( !pfn_buf_new  )
+        {
+            ERROR("Couldn't allocate memory");
+            goto out;
+        }
+
+        /* Stop sending pages to the destination domain */
+        rc = xc_domain_set_recipient(xch, source_dom, DOMID_INVALID);
+        if ( rc != 0 )
+        {
+            /* Don't fail here -- source domain may have just died */
+            PERROR("failed to set recipient, rc=%d\n", rc);
+        }
+
+        if ( get_pfn_types(xch, dest_dom, pfn_buf_new, max_gpfn) )
+        {
+            goto out;
+        }
+
+        for (pfn = 0; pfn < max_gpfn + 1; pfn++)
+        {
+            if ( (pfn_buf_new[pfn] == XEN_DOMCTL_PFINFO_XTAB) &&
+                 (pfn_buf_old[pfn] != XEN_DOMCTL_PFINFO_XTAB) )
+            {
+                if ( xc_domain_populate_physmap_exact(xch, dest_dom,
+                                                      1, 0, 0, &pfn) )
+                {
+                    free(pfn_buf_new);
+                    PERROR("failed to populate pfn %lx", pfn);
+                }
+
+                source_pg = xc_map_foreign_pages(xch, source_dom,
+                                                 PROT_READ, &pfn, 1);
+                if ( !source_pg )
+                {
+                    PERROR("failed to map source page %lx", pfn);
+                    continue;
+                }
+
+                dest_pg = xc_map_foreign_pages(xch, dest_dom,
+                                               PROT_READ|PROT_WRITE, &pfn, 1);
+                if ( !dest_pg )
+                {
+                    munmap(source_pg, PAGE_SIZE);
+                    PERROR("failed to map dest page %lx", pfn);
+                    continue;
+                }
+                memcpy(dest_pg, source_pg, PAGE_SIZE);
+                munmap(source_pg, PAGE_SIZE);
+                munmap(dest_pg, PAGE_SIZE);
+            }
+        }
+        free(pfn_buf_new);
+
+
+    }
+
+    if ( pfn_buf_old[old_info.shared_info_frame] == XEN_DOMCTL_PFINFO_XTAB)
+    {
+        /*
+         * Shared info frame is being removed when guest maps shared info so
+         * this page is likely XEN_DOMCTL_PFINFO_XTAB but we need to replace
+         * it with an empty page in that case.
+         */
+
+        if ( xc_domain_populate_physmap_exact(xch, dest_dom, 1, 0, 0,
+                                              &old_info.shared_info_frame) )
+        {
+            free(pfn_buf_new);
+            PERROR("failed to populate pfn %lx (shared info)", pfn);
+            goto out;
+        }
+    }
+
+    if ( xc_domain_hvm_setcontext(xch, dest_dom, hvm_buf,
+                                  hvm_buf_size) == -1 )
+    {
+        PERROR("HVM:Could not set hvm buffer");
+        goto out;
+    }
+
+    if ( store_pfn )
+        xc_clear_domain_page(xch, dest_dom, store_pfn);
+
+    if ( console_pfn )
+        xc_clear_domain_page(xch, dest_dom, console_pfn);
+
+    if ( buffio_pfn )
+        xc_clear_domain_page(xch, dest_dom, buffio_pfn);
+
+    if ( io_pfn )
+        xc_clear_domain_page(xch, dest_dom, io_pfn);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_STORE_PFN,
+                     hvm_params[HVM_PARAM_STORE_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom,
+                     HVM_PARAM_CONSOLE_PFN,
+                     hvm_params[HVM_PARAM_CONSOLE_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_BUFIOREQ_PFN,
+                     hvm_params[HVM_PARAM_BUFIOREQ_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IOREQ_PFN,
+                     hvm_params[HVM_PARAM_IOREQ_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IDENT_PT,
+                     hvm_params[HVM_PARAM_IDENT_PT]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_PAGING_RING_PFN,
+                     hvm_params[HVM_PARAM_PAGING_RING_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_ACCESS_RING_PFN,
+                     hvm_params[HVM_PARAM_ACCESS_RING_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VM86_TSS,
+                     hvm_params[HVM_PARAM_VM86_TSS]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_ACPI_IOPORTS_LOCATION,
+                     hvm_params[HVM_PARAM_ACPI_IOPORTS_LOCATION]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VIRIDIAN,
+                     hvm_params[HVM_PARAM_VIRIDIAN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_PAE_ENABLED,
+                     hvm_params[HVM_PARAM_PAE_ENABLED]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_STORE_EVTCHN,
+                     hvm_params[HVM_PARAM_STORE_EVTCHN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_IOREQ_SERVER_PFN,
+                     hvm_params[HVM_PARAM_IOREQ_SERVER_PFN]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_NR_IOREQ_SERVER_PAGES,
+                     hvm_params[HVM_PARAM_NR_IOREQ_SERVER_PAGES]);
+
+    xc_hvm_param_set(xch, dest_dom, HVM_PARAM_VM_GENERATION_ID_ADDR,
+                     hvm_params[HVM_PARAM_VM_GENERATION_ID_ADDR]);
+
+    if (xc_dom_gnttab_hvm_seed(xch, dest_dom, console_pfn, store_pfn,
+                               console_domid, store_domid))
+    {
+        PERROR("error seeding hvm grant table");
+        goto out;
+    }
+
+    rc = 0;
+out:
+    if (pfn_buf_old) free(pfn_buf_old);
+
+    if (hvm_buf) free(hvm_buf);
+
+    if ( (rc != 0) && (dest_dom != 0) ) {
+            PERROR("Faled to perform soft reset, destroying domain %d",
+                   dest_dom);
+           xc_domain_destroy(xch, dest_dom);
+    }
+
+    return !!rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h
index 40bbac8..770cd10 100644
--- a/tools/libxc/xenguest.h
+++ b/tools/libxc/xenguest.h
@@ -131,6 +131,26 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
  * of the new domain is automatically appended to the filename,
  * separated by a ".".
  */
+
+/**
+ * This function does soft reset for a domain. During soft reset all
+ * source domain's memory is being reassigned to the destination domain,
+ * HVM context and HVM params are being copied.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm source_dom the id of the source domain
+ * @parm dest_dom the id of the destination domain
+ * @parm console_domid the id of the domain handling console
+ * @parm console_mfn returned with the mfn of the console page
+ * @parm store_domid the id of the domain handling store
+ * @parm store_mfn returned with the mfn of the store page
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_soft_reset(xc_interface *xch, uint32_t source_dom,
+                         uint32_t dest_dom, domid_t console_domid,
+                         unsigned long *console_mfn, domid_t store_domid,
+                         unsigned long *store_mfn);
+
 #define XC_DEVICE_MODEL_RESTORE_FILE "/var/lib/xen/qemu-resume"
 
 /**
-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.