[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] Teach xc_save to use event-channel-based domain suspend if available.



# HG changeset patch
# User Brendan Cully <brendan@xxxxxxxxx>
# Date 1215120748 25200
# Node ID 3e1d3cb03e8bf2b2f44066e6d7ec9729995d1db1
# Parent  ca55cf40cd134b41812c8c4b851d7df51031de07
Teach xc_save to use event-channel-based domain suspend if available.
If the guest provides a suspend event channel through xenstore,
xc_save will use it in preference to the old xenstore-based method.

Xend is still informed when the domain has suspended so that it can
perform device migration in parallel with last-round migration.

Signed-off-by: Brendan Cully <brendan@xxxxxxxxx>

diff --git a/tools/python/xen/xend/XendCheckpoint.py 
b/tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py
+++ b/tools/python/xen/xend/XendCheckpoint.py
@@ -108,6 +108,7 @@
                 log.debug("Suspending %d ...", dominfo.getDomid())
                 dominfo.shutdown('suspend')
                 dominfo.waitForShutdown()
+            if line in ('suspend', 'suspended'):
                 dominfo.migrateDevices(network, dst, DEV_MIGRATE_STEP2,
                                        domain_name)
                 log.info("Domain %d suspended.", dominfo.getDomid())
@@ -116,6 +117,7 @@
                 if hvm:
                     dominfo.image.saveDeviceModel()
 
+            if line == "suspend":
                 tochild.write("done\n")
                 tochild.flush()
                 log.debug('Written done')
diff --git a/tools/xcutils/xc_save.c b/tools/xcutils/xc_save.c
--- a/tools/xcutils/xc_save.c
+++ b/tools/xcutils/xc_save.c
@@ -23,11 +23,18 @@
 #include <xenctrl.h>
 #include <xenguest.h>
 
+static struct suspendinfo {
+    int xce; /* event channel handle */
+
+    int suspend_evtchn;
+    int suspended_evtchn;
+} si;
+
 /**
  * Issue a suspend request through stdout, and receive the acknowledgement
  * from stdin.  This is handled by XendCheckpoint in the Python layer.
  */
-static int suspend(int domid)
+static int compat_suspend(int domid)
 {
     char ans[30];
 
@@ -36,6 +43,131 @@
 
     return (fgets(ans, sizeof(ans), stdin) != NULL &&
             !strncmp(ans, "done\n", 5));
+}
+
+static int suspend_evtchn_release(int xc, int domid)
+{
+    if (si.suspended_evtchn >= 0) {
+       xc_dom_subscribe(xc, domid, 0);
+       xc_evtchn_unbind(si.xce, si.suspended_evtchn);
+       si.suspended_evtchn = -1;
+    }
+    if (si.suspend_evtchn >= 0) {
+       xc_evtchn_unbind(si.xce, si.suspend_evtchn);
+       si.suspend_evtchn = -1;
+    }
+    if (si.xce >= 0) {
+       xc_evtchn_close(si.xce);
+       si.xce = -1;
+    }
+
+    return 0;
+}
+
+static int suspend_evtchn_init(int xc, int domid)
+{
+    struct xs_handle *xs;
+    char path[128];
+    char *portstr;
+    unsigned int plen;
+    int port;
+    int rc;
+
+    si.xce = -1;
+    si.suspend_evtchn = -1;
+    si.suspended_evtchn = -1;
+
+    xs = xs_daemon_open();
+    if (!xs) {
+       errx(1, "failed to get xenstore handle");
+       return -1;
+    }
+    sprintf(path, "/local/domain/%d/device/suspend/event-channel", domid);
+    portstr = xs_read(xs, XBT_NULL, path, &plen);
+    xs_daemon_close(xs);
+
+    if (!portstr || !plen) {
+       warnx("could not read suspend event channel");
+       return -1;
+    }
+
+    port = atoi(portstr);
+    free(portstr);
+
+    si.xce = xc_evtchn_open();
+    if (si.xce < 0) {
+       errx(1, "failed to open event channel handle");
+       goto cleanup;
+    }
+
+    si.suspend_evtchn = xc_evtchn_bind_interdomain(si.xce, domid, port);
+    if (si.suspend_evtchn < 0) {
+       errx(1, "failed to bind suspend event channel: %d",
+            si.suspend_evtchn);
+       goto cleanup;
+    }
+
+    si.suspended_evtchn = xc_evtchn_bind_unbound_port(si.xce, domid);
+    if (si.suspended_evtchn < 0) {
+       errx(1, "failed to allocate suspend notification port: %d",
+            si.suspended_evtchn);
+       goto cleanup;
+    }
+
+    rc = xc_dom_subscribe(xc, domid, si.suspended_evtchn);
+    if (rc < 0) {
+       errx(1, "failed to subscribe to domain: %d", rc);
+       goto cleanup;
+    }
+
+    return 0;
+
+  cleanup:
+    suspend_evtchn_release(xc, domid);
+
+    return -1;
+}
+
+/**
+ * Issue a suspend request to a dedicated event channel in the guest, and
+ * receive the acknowledgement from the subscribe event channel. */
+static int evtchn_suspend(int domid)
+{
+    int xcefd;
+    int rc;
+
+    rc = xc_evtchn_notify(si.xce, si.suspend_evtchn);
+    if (rc < 0) {
+       errx(1, "failed to notify suspend request channel: %d", rc);
+       return 0;
+    }
+
+    xcefd = xc_evtchn_fd(si.xce);
+    do {
+      rc = xc_evtchn_pending(si.xce);
+      if (rc < 0) {
+       errx(1, "error polling suspend notification channel: %d", rc);
+       return 0;
+      }
+    } while (rc != si.suspended_evtchn);
+
+    /* harmless for one-off suspend */
+    if (xc_evtchn_unmask(si.xce, si.suspended_evtchn) < 0)
+       errx(1, "failed to unmask suspend notification channel: %d", rc);
+
+    /* notify xend that it can do device migration */
+    printf("suspended\n");
+    fflush(stdout);
+
+    return 1;
+}
+
+static int suspend(int domid)
+{
+    if (si.suspend_evtchn >= 0)
+       return evtchn_suspend(domid);
+
+    return compat_suspend(domid);
 }
 
 /* For HVM guests, there are two sources of dirty pages: the Xen shadow
@@ -188,9 +320,13 @@
     max_f = atoi(argv[4]);
     flags = atoi(argv[5]);
 
+    suspend_evtchn_init(xc_fd, domid);
+
     ret = xc_domain_save(xc_fd, io_fd, domid, maxit, max_f, flags, 
                          &suspend, !!(flags & XCFLAGS_HVM),
                          &init_qemu_maps, &qemu_flip_buffer);
+
+    suspend_evtchn_release(xc_fd, domid);
 
     xc_interface_close(xc_fd);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.