[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xend: Restore scheduling parameters



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1247736775 -3600
# Node ID e07726c03d31283ae85666b79f5478ae46225684
# Parent  8ce42378828bfa1576cbc601cadf61cb21439bf7
xend: Restore scheduling parameters

Scheduling parameters are reset to default values when I restore
or migrate a domain.

Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>
---
 tools/python/xen/xend/XendConfig.py     |    5 ++++-
 tools/python/xen/xend/XendDomainInfo.py |   16 ++++++++++------
 2 files changed, 14 insertions(+), 7 deletions(-)

diff -r 8ce42378828b -r e07726c03d31 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Thu Jul 16 10:30:50 2009 +0100
+++ b/tools/python/xen/xend/XendConfig.py       Thu Jul 16 10:32:55 2009 +0100
@@ -1055,7 +1055,10 @@ class XendConfig(dict):
         if domain.getDomid() is not None:
             sxpr.append(['domid', domain.getDomid()])
 
-        if not legacy_only:
+        if legacy_only:
+            sxpr.append(['cpu_weight', int(self['vcpus_params'].get('weight', 
256))])
+            sxpr.append(['cpu_cap', int(self['vcpus_params'].get('cap', 0))])
+        else:
             for name, typ in XENAPI_CFG_TYPES.items():
                 if name in self and self[name] not in (None, []):
                     if typ == dict:
diff -r 8ce42378828b -r e07726c03d31 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Jul 16 10:30:50 2009 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Thu Jul 16 10:32:55 2009 +0100
@@ -465,17 +465,11 @@ class XendDomainInfo:
                 XendTask.log_progress(91, 100, self.refreshShutdown)
 
                 xendomains = XendDomain.instance()
-                xennode = XendNode.instance()
 
                 # save running configuration if XendDomains believe domain is
                 # persistent
                 if is_managed:
                     xendomains.managed_config_save(self)
-
-                if xennode.xenschedinfo() == 'credit':
-                    xendomains.domain_sched_credit_set(self.getDomid(),
-                                                       self.getWeight(),
-                                                       self.getCap())
             except:
                 log.exception('VM start failed')
                 self.destroy()
@@ -497,6 +491,7 @@ class XendDomainInfo:
                     # we just ignore it so that the domain can still be 
restored
                     log.warn("Cannot restore CPU affinity")
 
+                self._setSchedParams()
                 self._storeVmDetails()
                 self._createChannels()
                 self._createDevices()
@@ -2555,6 +2550,12 @@ class XendDomainInfo:
                 for v in range(0, self.info['VCPUs_max']):
                     xc.vcpu_setaffinity(self.domid, v, cpumask)
 
+    def _setSchedParams(self):
+        if XendNode.instance().xenschedinfo() == 'credit':
+            from xen.xend import XendDomain
+            XendDomain.instance().domain_sched_credit_set(self.getDomid(),
+                                                          self.getWeight(),
+                                                          self.getCap())
 
     def _initDomain(self):
         log.debug('XendDomainInfo.initDomain: %s %s',
@@ -2570,6 +2571,9 @@ class XendDomainInfo:
             # this is done prior to memory allocation to aide in memory
             # distribution for NUMA systems.
             self._setCPUAffinity()
+
+            # Set scheduling parameters.
+            self._setSchedParams()
 
             # Use architecture- and image-specific calculations to determine
             # the various headrooms necessary, given the raw configured

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.