[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [HVM][LINUX][TOOLS] Split control/reboot_module into control/feature-reboot



# HG changeset patch
# User Steven Smith <ssmith@xxxxxxxxxxxxx>
# Node ID c3b004aef31721d83f428fc58ffc75c94685bf24
# Parent  eb3fe0620e3d5e17f46b5808bcf8efd5f475a2e3
[HVM][LINUX][TOOLS] Split control/reboot_module into control/feature-reboot
and control/feature-sysrq.  This is more consistent with the other
PV device protocols.

Signed-off-by: Steven Smith <sos22@xxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c |    4 -
 linux-2.6-xen-sparse/drivers/xen/core/reboot.c         |    8 +--
 tools/python/xen/xend/image.py                         |   42 ++++++++---------
 3 files changed, 25 insertions(+), 29 deletions(-)

diff -r eb3fe0620e3d -r c3b004aef317 
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c    Tue Oct 31 
09:54:49 2006 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c    Tue Oct 31 
10:11:53 2006 +0000
@@ -59,10 +59,6 @@ EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_power_off);
 
-#endif /* defined(__i386__) || defined(__x86_64__) */
-
-#if defined(__i386__) || defined(__x86_64__)
-
 /* Ensure we run on the idle task page tables so that we will
    switch page tables before running user space. This is needed
    on architectures with separate kernel and user page tables
diff -r eb3fe0620e3d -r c3b004aef317 
linux-2.6-xen-sparse/drivers/xen/core/reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c    Tue Oct 31 09:54:49 
2006 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c    Tue Oct 31 10:11:53 
2006 +0000
@@ -164,10 +164,14 @@ static int setup_shutdown_watcher(struct
        err = register_xenbus_watch(&shutdown_watch);
        if (err)
                printk(KERN_ERR "Failed to set shutdown watcher\n");
+       else
+               xenbus_write(XBT_NIL, "control", "feature-reboot", "1");
 
        err = register_xenbus_watch(&sysrq_watch);
        if (err)
                printk(KERN_ERR "Failed to set sysrq watcher\n");
+       else
+               xenbus_write(XBT_NIL, "control", "feature-sysrq", "1");
 
        return NOTIFY_DONE;
 }
@@ -179,10 +183,6 @@ static int __init setup_shutdown_event(v
        };
        register_xenstore_notifier(&xenstore_notifier);
 
-       if (!is_initial_xendomain()) {
-               xenbus_write(XBT_NIL, "control", "reboot_module", "installed");
-       }
-
        return 0;
 }
 
diff -r eb3fe0620e3d -r c3b004aef317 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Tue Oct 31 09:54:49 2006 +0000
+++ b/tools/python/xen/xend/image.py    Tue Oct 31 10:11:53 2006 +0000
@@ -282,7 +282,7 @@ class HVMImageHandler(ImageHandler):
         log.debug("apic           = %d", self.apic)
 
         self.register_shutdown_watch()
-        self.register_reboot_module_watch()
+        self.register_reboot_feature_watch()
 
         return xc.hvm_build(dom            = self.vm.getDomid(),
                             image          = self.kernel,
@@ -417,7 +417,7 @@ class HVMImageHandler(ImageHandler):
 
     def destroy(self):
         self.unregister_shutdown_watch();
-        self.unregister_reboot_module_watch();
+        self.unregister_reboot_feature_watch();
         if not self.pid:
             return
         os.kill(self.pid, signal.SIGKILL)
@@ -460,34 +460,34 @@ class HVMImageHandler(ImageHandler):
 
         return 1 # Keep watching
 
-    def register_reboot_module_watch(self):
-        """ add xen store watch on control/reboot_module """
-        self.rebootModuleWatch = xswatch(self.vm.dompath + 
"/control/reboot_module", \
-                                    self.hvm_reboot_module)
-        log.debug("hvm reboot module watch registered")
-
-    def unregister_reboot_module_watch(self):
-        """Remove the watch on the control/reboot_module, if any. Nothrow
+    def register_reboot_feature_watch(self):
+        """ add xen store watch on control/feature-reboot """
+        self.rebootModuleWatch = xswatch(self.vm.dompath + 
"/control/feature-reboot", \
+                                         self.hvm_reboot_feature)
+        log.debug("hvm reboot feature watch registered")
+
+    def unregister_reboot_feature_watch(self):
+        """Remove the watch on the control/feature-reboot, if any. Nothrow
         guarantee."""
 
         try:
-            if self.rebootModuleWatch:
-                self.rebootModuleWatch.unwatch()
+            if self.rebootFeatureWatch:
+                self.rebootFeatureWatch.unwatch()
         except:
-            log.exception("Unwatching hvm reboot module watch failed.")
-        self.rebootModuleWatch = None
-        log.debug("hvm reboot module watch unregistered")
-
-    def hvm_reboot_module(self, _):
-        """ watch call back on node control/reboot_module,
+            log.exception("Unwatching hvm reboot feature watch failed.")
+        self.rebootFeatureWatch = None
+        log.debug("hvm reboot feature watch unregistered")
+
+    def hvm_reboot_feature(self, _):
+        """ watch call back on node control/feature-reboot,
             if node changed, this function will be called
         """
         xd = xen.xend.XendDomain.instance()
         vm = xd.domain_lookup( self.vm.getDomid() )
 
-        reboot_module_status = vm.readDom('control/reboot_module')
-        log.debug("hvm_reboot_module fired, module status=%s", 
reboot_module_status)
-        if reboot_module_status == 'installed':
+        status = vm.readDom('control/feature-reboot')
+        log.debug("hvm_reboot_feature fired, module status=%s", status)
+        if status == '1':
             self.unregister_shutdown_watch()
 
         return 1 # Keep watching

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.