[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] xend: Restore CPU affinity on domain resume.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1226401587 0 # Node ID 76e90ac5067ef71f60b68ea0515f7f0466be5dca # Parent beade55d67fc2c81adaaa552804e0b66dc25becb xend: Restore CPU affinity on domain resume. Move affinity-setting logic into its own function and call from relevant places. From: Jiri Denemark <jdenemar@xxxxxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- tools/python/xen/xend/XendDomainInfo.py | 112 +++++++++++++++++--------------- 1 files changed, 60 insertions(+), 52 deletions(-) diff -r beade55d67fc -r 76e90ac5067e tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Tue Nov 11 11:03:58 2008 +0000 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Nov 11 11:06:27 2008 +0000 @@ -479,6 +479,7 @@ class XendDomainInfo: if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED): try: self._constructDomain() + self._setCPUAffinity() self._storeVmDetails() self._createChannels() self._createDevices() @@ -2166,6 +2167,64 @@ class XendDomainInfo: raise XendError(str(exn)) + def _setCPUAffinity(self): + """ Repin domain vcpus if a restricted cpus list is provided + """ + + def has_cpus(): + if self.info['cpus'] is not None: + for c in self.info['cpus']: + if c: + return True + return False + + if has_cpus(): + for v in range(0, self.info['VCPUs_max']): + if self.info['cpus'][v]: + xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v]) + else: + def find_relaxed_node(node_list): + import sys + nr_nodes = info['nr_nodes'] + if node_list is None: + node_list = range(0, nr_nodes) + nodeload = [0] + nodeload = nodeload * nr_nodes + from xen.xend import XendDomain + doms = XendDomain.instance().list('all') + for dom in filter (lambda d: d.domid != self.domid, doms): + cpuinfo = dom.getVCPUInfo() + for vcpu in sxp.children(cpuinfo, 'vcpu'): + if sxp.child_value(vcpu, 'online') == 0: continue + cpumap = list(sxp.child_value(vcpu,'cpumap')) + for i in range(0, nr_nodes): + node_cpumask = info['node_to_cpu'][i] + for j in node_cpumask: + if j in cpumap: + nodeload[i] += 1 + break + for i in range(0, nr_nodes): + if len(info['node_to_cpu'][i]) > 0 and i in node_list: + nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) + else: + nodeload[i] = sys.maxint + index = nodeload.index( min(nodeload) ) + return index + + info = xc.physinfo() + if info['nr_nodes'] > 1: + node_memory_list = info['node_to_memory'] + needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024 + candidate_node_list = [] + for i in range(0, info['nr_nodes']): + if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0: + candidate_node_list.append(i) + index = find_relaxed_node(candidate_node_list) + cpumask = info['node_to_cpu'][index] + for v in range(0, self.info['VCPUs_max']): + xc.vcpu_setaffinity(self.domid, v, cpumask) + + def _initDomain(self): log.debug('XendDomainInfo.initDomain: %s %s', self.domid, @@ -2185,58 +2244,7 @@ class XendDomainInfo: # repin domain vcpus if a restricted cpus list is provided # this is done prior to memory allocation to aide in memory # distribution for NUMA systems. - def has_cpus(): - if self.info['cpus'] is not None: - for c in self.info['cpus']: - if c: - return True - return False - - if has_cpus(): - for v in range(0, self.info['VCPUs_max']): - if self.info['cpus'][v]: - xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v]) - else: - def find_relaxed_node(node_list): - import sys - nr_nodes = info['nr_nodes'] - if node_list is None: - node_list = range(0, nr_nodes) - nodeload = [0] - nodeload = nodeload * nr_nodes - from xen.xend import XendDomain - doms = XendDomain.instance().list('all') - for dom in filter (lambda d: d.domid != self.domid, doms): - cpuinfo = dom.getVCPUInfo() - for vcpu in sxp.children(cpuinfo, 'vcpu'): - if sxp.child_value(vcpu, 'online') == 0: continue - cpumap = list(sxp.child_value(vcpu,'cpumap')) - for i in range(0, nr_nodes): - node_cpumask = info['node_to_cpu'][i] - for j in node_cpumask: - if j in cpumap: - nodeload[i] += 1 - break - for i in range(0, nr_nodes): - if len(info['node_to_cpu'][i]) > 0 and i in node_list: - nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) - else: - nodeload[i] = sys.maxint - index = nodeload.index( min(nodeload) ) - return index - - info = xc.physinfo() - if info['nr_nodes'] > 1: - node_memory_list = info['node_to_memory'] - needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024 - candidate_node_list = [] - for i in range(0, info['nr_nodes']): - if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0: - candidate_node_list.append(i) - index = find_relaxed_node(candidate_node_list) - cpumask = info['node_to_cpu'][index] - for v in range(0, self.info['VCPUs_max']): - xc.vcpu_setaffinity(self.domid, v, cpumask) + self._setCPUAffinity() # Use architecture- and image-specific calculations to determine # the various headrooms necessary, given the raw configured _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |