[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xend: Use max_node_id rather than nr_nodes where appropriate.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1263752404 0
# Node ID 7a8cee80597eb374409fe2f320e740ef9ef051ce
# Parent  757359a367856bb79b26119292be013a23234c16
xend: Use max_node_id rather than nr_nodes where appropriate.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 tools/python/xen/xend/XendDomainInfo.py |   10 +++++-----
 tools/python/xen/xend/XendNode.py       |    7 ++++---
 2 files changed, 9 insertions(+), 8 deletions(-)

diff -r 757359a36785 -r 7a8cee80597e tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Sun Jan 17 18:07:10 2010 +0000
+++ b/tools/python/xen/xend/XendDomainInfo.py   Sun Jan 17 18:20:04 2010 +0000
@@ -2690,7 +2690,7 @@ class XendDomainInfo:
         else:
             def find_relaxed_node(node_list):
                 import sys
-                nr_nodes = info['nr_nodes']
+                nr_nodes = info['max_node_id']+1
                 if node_list is None:
                     node_list = range(0, nr_nodes)
                 nodeload = [0]
@@ -2722,12 +2722,12 @@ class XendDomainInfo:
                 node_memory_list = info['node_to_memory']
                 needmem = 
self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
                 candidate_node_list = []
-                for i in range(0, info['nr_nodes']):
+                for i in range(0, info['max_node_id']+1):
                     if node_memory_list[i] >= needmem and 
len(info['node_to_cpu'][i]) > 0:
                         candidate_node_list.append(i)
                 best_node = find_relaxed_node(candidate_node_list)[0]
                 cpumask = info['node_to_cpu'][best_node]
-                best_nodes = find_relaxed_node(filter(lambda x: x != 
best_node, range(0,info['nr_nodes'])))
+                best_nodes = find_relaxed_node(filter(lambda x: x != 
best_node, range(0,info['max_node_id']+1)))
                 for node_idx in best_nodes:
                     if len(cpumask) >= self.info['VCPUs_max']:
                         break
@@ -2754,9 +2754,9 @@ class XendDomainInfo:
             while (retries > 0):
                 physinfo = xc.physinfo()
                 free_mem = physinfo['free_memory']
-                nr_nodes = physinfo['nr_nodes']
+                max_node_id = physinfo['max_node_id']
                 node_to_dma32_mem = physinfo['node_to_dma32_mem']
-                if (node > nr_nodes):
+                if (node > max_node_id):
                     return
                 # Extra 2MB above 64GB seems to do the trick.
                 need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
diff -r 757359a36785 -r 7a8cee80597e tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Sun Jan 17 18:07:10 2010 +0000
+++ b/tools/python/xen/xend/XendNode.py Sun Jan 17 18:20:04 2010 +0000
@@ -879,7 +879,7 @@ class XendNode:
         whitespace=''
         try:
             node_to_cpu=pinfo['node_to_cpu']
-            for i in range(0, pinfo['nr_nodes']):
+            for i in range(0, pinfo['max_node_id']+1):
                 str+='%snode%d:%s\n' % (whitespace,
                                         i, 
                                       self.list_to_strrange(node_to_cpu[i]))
@@ -892,7 +892,7 @@ class XendNode:
         whitespace=''
         try:
             node_to_memory=pinfo[key]
-            for i in range(0, pinfo['nr_nodes']):
+            for i in range(0, pinfo['max_node_id']+1):
                 str+='%snode%d:%d\n' % (whitespace,
                                         i,
                                         node_to_memory[i] / 1024)
@@ -927,7 +927,8 @@ class XendNode:
                       'free_memory',
                       'node_to_cpu',
                       'node_to_memory',
-                      'node_to_dma32_mem'
+                      'node_to_dma32_mem',
+                      'max_node_id'
                       ]
 
         return [[k, info[k]] for k in ITEM_ORDER]

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.