[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xend: Remove broken bits of NUMA code.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1275371723 -3600
# Node ID f41f1ae36b66ab9db88aa35ab3b161af34196ba1
# Parent  144603cc8cb6d42cf6f38bfb0bf9ca17d96c1380
xend: Remove broken bits of NUMA code.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 tools/python/xen/xend/XendCheckpoint.py |   15 ---------------
 tools/python/xen/xend/balloon.py        |   29 -----------------------------
 2 files changed, 44 deletions(-)

diff -r 144603cc8cb6 -r f41f1ae36b66 tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py   Tue Jun 01 06:45:44 2010 +0100
+++ b/tools/python/xen/xend/XendCheckpoint.py   Tue Jun 01 06:55:23 2010 +0100
@@ -239,21 +239,6 @@ def restore(xd, fd, dominfo = None, paus
         dominfo.destroy()
         raise XendError("cannot restore non-migratable domain")
 
-    # repin domain vcpus if a target node number was specified 
-    # this is done prior to memory allocation to aide in memory
-    # distribution for NUMA systems.
-    nodenr = -1
-    for i,l in enumerate(vmconfig):
-        if type(l) == type([]):
-            if l[0] == 'node':
-                nodenr = int(l[1])
-
-    if nodenr >= 0:
-        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
-        if nodenr < len(node_to_cpu):
-            for v in range(0, dominfo.info['VCPUs_max']):
-                 xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])
-
     store_port   = dominfo.getStorePort()
     console_port = dominfo.getConsolePort()
 
diff -r 144603cc8cb6 -r f41f1ae36b66 tools/python/xen/xend/balloon.py
--- a/tools/python/xen/xend/balloon.py  Tue Jun 01 06:45:44 2010 +0100
+++ b/tools/python/xen/xend/balloon.py  Tue Jun 01 06:55:23 2010 +0100
@@ -174,35 +174,6 @@ def free(need_mem, dominfo):
             free_mem = physinfo['free_memory']
             scrub_mem = physinfo['scrub_memory']
 
-        # Check whethercurrent machine is a numa system and the new 
-        # created hvm has all its vcpus in the same node, if all the 
-        # conditions above are fit. We will wait until all the pages 
-        # in scrub list are freed (if waiting time go beyond 20s, 
-        # we will stop waiting it.)
-        if physinfo['nr_nodes'] > 1 and retries == 0:
-            oldnode = -1
-            waitscrub = 1
-            vcpus = dominfo.info['cpus'][0]
-            for vcpu in vcpus:
-                nodenum = xc.numainfo()['cpu_to_node'][cpu]
-                if oldnode == -1:
-                    oldnode = nodenum
-                elif oldnode != nodenum:
-                    waitscrub = 0
-
-            if waitscrub == 1 and scrub_mem > 0:
-                log.debug("wait for scrub %s", scrub_mem)
-                while scrub_mem > 0 and retries < rlimit:
-                    time.sleep(sleep_time)
-                    physinfo = xc.physinfo()
-                    free_mem = physinfo['free_memory']
-                    scrub_mem = physinfo['scrub_memory']
-                    retries += 1
-                    sleep_time += SLEEP_TIME_GROWTH
-                log.debug("scrub for %d times", retries)
-
-            retries = 0
-            sleep_time = SLEEP_TIME_GROWTH
         while retries < rlimit:
             physinfo = xc.physinfo()
             free_mem = physinfo['free_memory']

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.