[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Add auto-start and auto-stop functionality.
# HG changeset patch # User Ewan Mellor <ewan@xxxxxxxxxxxxx> # Date 1175010776 -3600 # Node ID ea68ae90fc105bb343261e4499fbae256af61af4 # Parent 104fc282e53c608d06d11ed22311cc23521cfe8b Add auto-start and auto-stop functionality. Patch by Alastair Tse <atse@xxxxxxxxxxxxx>. Signed-off-by: Ewan Mellor <ewan@xxxxxxxxxxxxx> --- tools/python/xen/xend/XendConfig.py | 5 ++++- tools/python/xen/xend/XendDomain.py | 20 ++++++++++++++++++++ tools/python/xen/xend/server/SrvServer.py | 17 +++++++++++++++-- 3 files changed, 39 insertions(+), 3 deletions(-) diff -r 104fc282e53c -r ea68ae90fc10 tools/python/xen/xend/XendConfig.py --- a/tools/python/xen/xend/XendConfig.py Tue Mar 27 16:33:26 2007 +0100 +++ b/tools/python/xen/xend/XendConfig.py Tue Mar 27 16:52:56 2007 +0100 @@ -452,7 +452,10 @@ class XendConfig(dict): for key, typ in XENAPI_CFG_TYPES.items(): val = sxp.child_value(sxp_cfg, key) if val is not None: - cfg[key] = typ(val) + try: + cfg[key] = typ(val) + except (ValueError, TypeError), e: + log.warn('Unable to convert type value for key: %s' % key) # Convert deprecated options to current equivalents. diff -r 104fc282e53c -r ea68ae90fc10 tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py Tue Mar 27 16:33:26 2007 +0100 +++ b/tools/python/xen/xend/XendDomain.py Tue Mar 27 16:52:56 2007 +0100 @@ -569,6 +569,26 @@ class XendDomain: finally: self.domains_lock.release() + def autostart_domains(self): + """ Autostart managed domains that are marked as such. """ + + need_starting = [] + + self.domains_lock.acquire() + try: + for dom_uuid, dom in self.managed_domains.items(): + if dom and dom.state == DOM_STATE_HALTED: + on_xend_start = dom.info.get('on_xend_start', 'ignore') + auto_power_on = dom.info.get('auto_power_on', False) + should_start = (on_xend_start == 'start') or auto_power_on + if should_start: + need_starting.append(dom_uuid) + finally: + self.domains_lock.release() + + for dom_uuid in need_starting: + self.domain_start(dom_uuid, False) + def cleanup_domains(self): """Clean up domains that are marked as autostop. Should be called when Xend goes down. This is currently diff -r 104fc282e53c -r ea68ae90fc10 tools/python/xen/xend/server/SrvServer.py --- a/tools/python/xen/xend/server/SrvServer.py Tue Mar 27 16:33:26 2007 +0100 +++ b/tools/python/xen/xend/server/SrvServer.py Tue Mar 27 16:52:56 2007 +0100 @@ -52,6 +52,7 @@ from xen.xend import Vifctl from xen.xend import Vifctl from xen.xend.XendLogging import log from xen.xend.XendClient import XEN_API_SOCKET +from xen.xend.XendDomain import instance as xenddomain from xen.web.SrvDir import SrvDir from SrvRoot import SrvRoot @@ -72,7 +73,7 @@ class XendServers: def add(self, server): self.servers.append(server) - def cleanup(self, signum = 0, frame = None): + def cleanup(self, signum = 0, frame = None, reloading = False): log.debug("SrvServer.cleanup()") self.cleaningUp = True for server in self.servers: @@ -80,12 +81,18 @@ class XendServers: server.shutdown() except: pass + + # clean up domains for those that have on_xend_stop + if not reloading: + xenddomain().cleanup_domains() + self.running = False + def reloadConfig(self, signum = 0, frame = None): log.debug("SrvServer.reloadConfig()") self.reloadingConfig = True - self.cleanup(signum, frame) + self.cleanup(signum, frame, reloading = True) def start(self, status): # Running the network script will spawn another process, which takes @@ -143,6 +150,12 @@ class XendServers: status.write('0') status.close() status = None + + # Reaching this point means we can auto start domains + try: + xenddomain().autostart_domains() + except Exception, e: + log.exception("Failed while autostarting domains") # loop to keep main thread alive until it receives a SIGTERM self.running = True _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |