[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: Avoid calling device suspend/resume callbacks



When suspending/resuming or migrating under Xen, there isn't much need
for suspending and resuming all the attached devices since the Xen/QEMU
should correctly maintain the hardware state. Drop these calls and
replace with more specific calls to ensure Xen frontend devices are
properly reconnected.

This change is needed to make NVIDIA vGPU migration work under Xen since
the vGPU device being suspended interferes with that working correctly.
However, it has the added benefit of reducing migration downtime - by
approximately 500ms with an HVM guest in my environment.

Tested by putting an HVM guest through 1000 migration cycles. I also
tested PV guest migration (though less rigorously).

Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
 drivers/xen/manage.c                       | 24 +++++++---------------
 drivers/xen/xenbus/xenbus_probe_frontend.c | 22 ++++++++++++++++++++
 include/xen/xenbus.h                       |  3 +++
 3 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index cd046684e0d1..53768e0e2560 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -113,21 +113,12 @@ static void do_suspend(void)
                goto out_thaw;
        }
 
-       err = dpm_suspend_start(PMSG_FREEZE);
-       if (err) {
-               pr_err("%s: dpm_suspend_start %d\n", __func__, err);
-               goto out_thaw;
-       }
+       xenbus_suspend_frontends();
 
        printk(KERN_DEBUG "suspending xenstore...\n");
        xs_suspend();
 
-       err = dpm_suspend_end(PMSG_FREEZE);
-       if (err) {
-               pr_err("dpm_suspend_end failed: %d\n", err);
-               si.cancelled = 0;
-               goto out_resume;
-       }
+       suspend_device_irqs();
 
        xen_arch_suspend();
 
@@ -141,7 +132,7 @@ static void do_suspend(void)
 
        raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
 
-       dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+       resume_device_irqs();
 
        if (err) {
                pr_err("failed to start xen_suspend: %d\n", err);
@@ -150,13 +141,12 @@ static void do_suspend(void)
 
        xen_arch_resume();
 
-out_resume:
-       if (!si.cancelled)
+       if (!si.cancelled) {
                xs_resume();
-       else
+               xenbus_resume_frontends();
+       } else {
                xs_suspend_cancel();
-
-       dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+       }
 
 out_thaw:
        thaw_processes();
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c 
b/drivers/xen/xenbus/xenbus_probe_frontend.c
index a7d90a719cea..8cd836c402e1 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -153,6 +153,28 @@ static struct xen_bus_type xenbus_frontend = {
        },
 };
 
+static int xenbus_suspend_one(struct device *dev, void *data)
+{
+       xenbus_dev_suspend(dev);
+       return 0;
+}
+
+void xenbus_suspend_frontends(void)
+{
+       bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, xenbus_suspend_one);
+}
+
+static int xenbus_resume_one(struct device *dev, void *data)
+{
+       xenbus_frontend_dev_resume(dev);
+       return 0;
+}
+
+void xenbus_resume_frontends(void)
+{
+       bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, xenbus_resume_one);
+}
+
 static void frontend_changed(struct xenbus_watch *watch,
                             const char *path, const char *token)
 {
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 869c816d5f8c..71eeb442c375 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -233,4 +233,7 @@ extern const struct file_operations xen_xenbus_fops;
 extern struct xenstore_domain_interface *xen_store_interface;
 extern int xen_store_evtchn;
 
+void xenbus_suspend_frontends(void);
+void xenbus_resume_frontends(void);
+
 #endif /* _XEN_XENBUS_H */
-- 
2.17.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.