[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [POWERPC] merge with xen-unstable.hg



# HG changeset patch
# User Hollis Blanchard <hollisb@xxxxxxxxxx>
# Node ID e01441c9a607d3364dc092f186e0d925a3d4d8d4
# Parent  18ce855ff594098e218123ec4c2d03e2e23acdd0
# Parent  a39ad4c788509f896461244db04a9cc97c85271d
[POWERPC] merge with xen-unstable.hg
---
 linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c |    8 
 tools/blktap/drivers/block-aio.c                 |   19 -
 tools/blktap/drivers/block-qcow.c                |   19 +
 tools/blktap/drivers/tapdisk.c                   |    1 
 tools/blktap/lib/xs_api.c                        |   23 +
 tools/python/xen/xend/FlatDeviceTree.py          |  323 +++++++++++++++++++++++
 tools/python/xen/xend/XendCheckpoint.py          |    6 
 tools/python/xen/xend/XendDomain.py              |    3 
 tools/python/xen/xend/XendDomainInfo.py          |   63 ++--
 tools/python/xen/xend/arch.py                    |   32 ++
 tools/python/xen/xend/image.py                   |  143 ++++++----
 tools/python/xen/xend/server/XMLRPCServer.py     |    3 
 tools/python/xen/xend/server/blkif.py            |   16 -
 tools/python/xen/xm/migrate.py                   |    3 
 tools/python/xen/xm/shutdown.py                  |   49 ++-
 15 files changed, 592 insertions(+), 119 deletions(-)

diff -r 18ce855ff594 -r e01441c9a607 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Tue Aug 29 17:53:57 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Wed Aug 30 14:09:31 
2006 -0500
@@ -114,8 +114,8 @@ typedef struct domid_translate {
 } domid_translate_t ;
 
 
-domid_translate_t  translate_domid[MAX_TAP_DEV];
-tap_blkif_t *tapfds[MAX_TAP_DEV];
+static domid_translate_t  translate_domid[MAX_TAP_DEV];
+static tap_blkif_t *tapfds[MAX_TAP_DEV];
 
 static int __init set_blkif_reqs(char *str)
 {
@@ -1118,7 +1118,7 @@ static int do_block_io_op(blkif_t *blkif
                               "ring does not exist!\n");
                        print_dbug = 0; /*We only print this message once*/
                }
-               return 1;
+               return 0;
        }
 
        info = tapfds[blkif->dev_num];
@@ -1127,7 +1127,7 @@ static int do_block_io_op(blkif_t *blkif
                        WPRINTK("Can't get UE info!\n");
                        print_dbug = 0;
                }
-               return 1;
+               return 0;
        }
 
        while (rc != rp) {
diff -r 18ce855ff594 -r e01441c9a607 tools/blktap/drivers/block-aio.c
--- a/tools/blktap/drivers/block-aio.c  Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/blktap/drivers/block-aio.c  Wed Aug 30 14:09:31 2006 -0500
@@ -52,7 +52,7 @@
  */
 #define REQUEST_ASYNC_FD 1
 
-#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ * 8)
+#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ)
 
 struct pending_aio {
        td_callback_t cb;
@@ -146,7 +146,7 @@ int tdaio_open (struct td_state *s, cons
        struct tdaio_state *prv = (struct tdaio_state *)s->private;
        s->private = prv;
 
-       DPRINTF("XXX: block-aio open('%s')", name);
+       DPRINTF("block-aio open('%s')", name);
        /* Initialize AIO */
        prv->iocb_free_count = MAX_AIO_REQS;
        prv->iocb_queued     = 0;
@@ -156,9 +156,18 @@ int tdaio_open (struct td_state *s, cons
 
        if (prv->poll_fd < 0) {
                ret = prv->poll_fd;
-               DPRINTF("Couldn't get fd for AIO poll support.  This is "
-                       "probably because your kernel does not have the "
-                       "aio-poll patch applied.\n");
+                if (ret == -EAGAIN) {
+                        DPRINTF("Couldn't setup AIO context.  If you are "
+                                "trying to concurrently use a large number "
+                                "of blktap-based disks, you may need to "
+                                "increase the system-wide aio request limit. "
+                                "(e.g. 'echo echo 1048576 > /proc/sys/fs/"
+                                "aio-max-nr')\n");
+                } else {
+                        DPRINTF("Couldn't get fd for AIO poll support.  This "
+                                "is probably because your kernel does not "
+                                "have the aio-poll patch applied.\n");
+                }
                goto done;
        }
 
diff -r 18ce855ff594 -r e01441c9a607 tools/blktap/drivers/block-qcow.c
--- a/tools/blktap/drivers/block-qcow.c Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/blktap/drivers/block-qcow.c Wed Aug 30 14:09:31 2006 -0500
@@ -51,7 +51,7 @@
 /******AIO DEFINES******/
 #define REQUEST_ASYNC_FD 1
 #define MAX_QCOW_IDS  0xFFFF
-#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ * 8)
+#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ)
 
 struct pending_aio {
         td_callback_t cb;
@@ -176,10 +176,21 @@ static int init_aio_state(struct td_stat
         s->aio_ctx = (io_context_t) REQUEST_ASYNC_FD;   
         s->poll_fd = io_setup(MAX_AIO_REQS, &s->aio_ctx);
 
-        if (s->poll_fd < 0) {
-                DPRINTF("Retrieving Async poll fd failed\n");
+       if (s->poll_fd < 0) {
+                if (s->poll_fd == -EAGAIN) {
+                        DPRINTF("Couldn't setup AIO context.  If you are "
+                                "trying to concurrently use a large number "
+                                "of blktap-based disks, you may need to "
+                                "increase the system-wide aio request limit. "
+                                "(e.g. 'echo echo 1048576 > /proc/sys/fs/"
+                                "aio-max-nr')\n");
+                } else {
+                        DPRINTF("Couldn't get fd for AIO poll support.  This "
+                                "is probably because your kernel does not "
+                                "have the aio-poll patch applied.\n");
+                }
                goto fail;
-        }
+       }
 
         for (i=0;i<MAX_AIO_REQS;i++)
                 s->iocb_free[i] = &s->iocb_list[i];
diff -r 18ce855ff594 -r e01441c9a607 tools/blktap/drivers/tapdisk.c
--- a/tools/blktap/drivers/tapdisk.c    Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/blktap/drivers/tapdisk.c    Wed Aug 30 14:09:31 2006 -0500
@@ -110,6 +110,7 @@ static void unmap_disk(struct td_state *
        free(s->fd_entry);
        free(s->blkif);
        free(s->ring_info);
+        free(s->private);
        free(s);
 
        return;
diff -r 18ce855ff594 -r e01441c9a607 tools/blktap/lib/xs_api.c
--- a/tools/blktap/lib/xs_api.c Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/blktap/lib/xs_api.c Wed Aug 30 14:09:31 2006 -0500
@@ -204,7 +204,7 @@ int convert_dev_name_to_num(char *name) 
 int convert_dev_name_to_num(char *name) {
        char *p_sd, *p_hd, *p_xvd, *p_plx, *p, *alpha,*ptr;
        int majors[10] = {3,22,33,34,56,57,88,89,90,91};
-       int maj,i;
+       int maj,i,ret = 0;
 
        asprintf(&p_sd,"/dev/sd");
        asprintf(&p_hd,"/dev/hd");
@@ -221,7 +221,7 @@ int convert_dev_name_to_num(char *name) 
                        *ptr++;
                }
                *p++;
-               return BASE_DEV_VAL + (16*i) + atoi(p);
+               ret = BASE_DEV_VAL + (16*i) + atoi(p);
        } else if (strstr(name, p_hd) != NULL) {
                p = name + strlen(p_hd);
                for (i = 0, ptr = alpha; i < strlen(alpha); i++) {
@@ -229,7 +229,7 @@ int convert_dev_name_to_num(char *name) 
                        *ptr++;
                }
                *p++;
-               return (majors[i/2]*256) + atoi(p);
+               ret = (majors[i/2]*256) + atoi(p);
 
        } else if (strstr(name, p_xvd) != NULL) {
                p = name + strlen(p_xvd);
@@ -238,17 +238,24 @@ int convert_dev_name_to_num(char *name) 
                        *ptr++;
                }
                *p++;
-               return (202*256) + (16*i) + atoi(p);
+               ret = (202*256) + (16*i) + atoi(p);
 
        } else if (strstr(name, p_plx) != NULL) {
                p = name + strlen(p_plx);
-               return atoi(p);
+               ret = atoi(p);
 
        } else {
                DPRINTF("Unknown device type, setting to default.\n");
-               return BASE_DEV_VAL;
-       }
-       return 0;
+               ret = BASE_DEV_VAL;
+       }
+
+        free(p_sd);
+        free(p_hd);
+        free(p_xvd);
+        free(p_plx);
+        free(alpha);
+        
+       return ret;
 }
 
 /**
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py   Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xend/XendCheckpoint.py   Wed Aug 30 14:09:31 2006 -0500
@@ -161,10 +161,12 @@ def restore(xd, fd):
         if handler.store_mfn is None or handler.console_mfn is None:
             raise XendError('Could not read store/console MFN')
 
+        #Block until src closes connection
+        os.read(fd, 1)
         dominfo.unpause()
-
+        
         dominfo.completeRestore(handler.store_mfn, handler.console_mfn)
-
+        
         return dominfo
     except:
         dominfo.destroy()
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xend/XendDomain.py       Wed Aug 30 14:09:31 2006 -0500
@@ -431,7 +431,8 @@ class XendDomain:
         sock.send("receive\n")
         sock.recv(80)
         XendCheckpoint.save(sock.fileno(), dominfo, True, live, dst)
-
+        dominfo.testDeviceComplete()
+        sock.close()
 
     def domain_save(self, domid, dst):
         """Start saving a domain to file.
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Aug 30 14:09:31 2006 -0500
@@ -30,7 +30,6 @@ import time
 import time
 import threading
 import os
-import math
 
 import xen.lowlevel.xc
 from xen.util import asserts
@@ -703,6 +702,9 @@ class XendDomainInfo:
                 if security[idx][0] == 'ssidref':
                     to_store['security/ssidref'] = str(security[idx][1])
 
+        if not self.readVm('xend/restart_count'):
+            to_store['xend/restart_count'] = str(0)
+
         log.debug("Storing VM details: %s", to_store)
 
         self.writeVm(to_store)
@@ -823,6 +825,9 @@ class XendDomainInfo:
 
     def setResume(self, state):
         self.info['resume'] = state
+
+    def getRestartCount(self):
+        return self.readVm('xend/restart_count')
 
     def refreshShutdown(self, xeninfo = None):
         # If set at the end of this method, a restart is required, with the
@@ -1280,34 +1285,28 @@ class XendDomainInfo:
                 for v in range(0, self.info['max_vcpu_id']+1):
                     xc.vcpu_setaffinity(self.domid, v, self.info['cpus'])
 
-            # set domain maxmem in KiB
-            xc.domain_setmaxmem(self.domid, self.info['maxmem'] * 1024)
-
-            m = self.image.getDomainMemory(self.info['memory'] * 1024)
+            # set memory limit
+            maxmem = self.image.getRequiredMemory(self.info['maxmem'] * 1024)
+            xc.domain_setmaxmem(self.domid, maxmem)
+
+            mem_kb = self.image.getRequiredMemory(self.info['memory'] * 1024)
 
             # get the domain's shadow memory requirement
-            sm = int(math.ceil(self.image.getDomainShadowMemory(m) / 1024.0))
-            if self.info['shadow_memory'] > sm:
-                sm = self.info['shadow_memory']
+            shadow_kb = self.image.getRequiredShadowMemory(mem_kb)
+            shadow_kb_req = self.info['shadow_memory'] * 1024
+            if shadow_kb_req > shadow_kb:
+                shadow_kb = shadow_kb_req
+            shadow_mb = (shadow_kb + 1023) / 1024
 
             # Make sure there's enough RAM available for the domain
-            balloon.free(m + sm * 1024)
+            balloon.free(mem_kb + shadow_mb * 1024)
 
             # Set up the shadow memory
-            sm = xc.shadow_mem_control(self.domid, mb=sm)
-            self.info['shadow_memory'] = sm
-
-            init_reservation = self.info['memory'] * 1024
-            if os.uname()[4] in ('ia64', 'ppc64'):
-                # Workaround for architectures that don't yet support
-                # ballooning.
-                init_reservation = m
-                # Following line from xiantao.zhang@xxxxxxxxx
-                # Needed for IA64 until supports ballooning -- okay for PPC64?
-                xc.domain_setmaxmem(self.domid, m)
-
-            xc.domain_memory_increase_reservation(self.domid, init_reservation,
-                                                  0, 0)
+            shadow_cur = xc.shadow_mem_control(self.domid, shadow_mb)
+            self.info['shadow_memory'] = shadow_cur
+
+            # initial memory allocation
+            xc.domain_memory_increase_reservation(self.domid, mem_kb, 0, 0)
 
             self.createChannels()
 
@@ -1495,6 +1494,21 @@ class XendDomainInfo:
             if rc != 0:
                 raise XendError("Device of type '%s' refuses migration." % n)
 
+    def testDeviceComplete(self):
+        """ For Block IO migration safety we must ensure that
+        the device has shutdown correctly, i.e. all blocks are
+        flushed to disk
+        """
+        while True:
+            test = 0
+            for i in self.getDeviceController('vbd').deviceIDs():
+                test = 1
+                log.info("Dev %s still active, looping...", i)
+                time.sleep(0.1)
+                
+            if test == 0:
+                break
+
     def migrateDevices(self, network, dst, step, domName=''):
         """Notify the devices about migration
         """
@@ -1615,6 +1629,9 @@ class XendDomainInfo:
             try:
                 new_dom = XendDomain.instance().domain_create(config)
                 new_dom.unpause()
+                rst_cnt = self.readVm('xend/restart_count')
+                rst_cnt = int(rst_cnt) + 1
+                self.writeVm('xend/restart_count', str(rst_cnt))
                 new_dom.removeVm(RESTART_IN_PROGRESS)
             except:
                 if new_dom:
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xend/image.py    Wed Aug 30 14:09:31 2006 -0500
@@ -27,6 +27,8 @@ from xen.xend.XendLogging import log
 from xen.xend.XendLogging import log
 from xen.xend.server.netif import randomMAC
 from xen.xend.xenstore.xswatch import xswatch
+from xen.xend import arch
+from xen.xend import FlatDeviceTree
 
 
 xc = xen.lowlevel.xc.xc()
@@ -141,19 +143,10 @@ class ImageHandler:
             raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
                           % (self.ostype, self.vm.getDomid(), str(result)))
 
-
-    def getDomainMemory(self, mem_kb):
-        """@return The memory required, in KiB, by the domain to store the
-        given amount, also in KiB."""
-        if os.uname()[4] != 'ia64':
-            # A little extra because auto-ballooning is broken w.r.t. HVM
-            # guests. Also, slack is necessary for live migration since that
-            # uses shadow page tables.
-            if 'hvm' in xc.xeninfo()['xen_caps']:
-                mem_kb += 4*1024;
+    def getRequiredMemory(self, mem_kb):
         return mem_kb
 
-    def getDomainShadowMemory(self, mem_kb):
+    def getRequiredShadowMemory(self, mem_kb):
         """@return The minimum shadow memory required, in KiB, for a domain 
         with mem_kb KiB of RAM."""
         # PV domains don't need any shadow memory
@@ -197,9 +190,39 @@ class LinuxImageHandler(ImageHandler):
                               ramdisk        = self.ramdisk,
                               features       = self.vm.getFeatures())
 
+class PPC_LinuxImageHandler(LinuxImageHandler):
+
+    ostype = "linux"
+
+    def configure(self, imageConfig, deviceConfig):
+        LinuxImageHandler.configure(self, imageConfig, deviceConfig)
+        self.imageConfig = imageConfig
+
+    def buildDomain(self):
+        store_evtchn = self.vm.getStorePort()
+        console_evtchn = self.vm.getConsolePort()
+
+        log.debug("dom            = %d", self.vm.getDomid())
+        log.debug("image          = %s", self.kernel)
+        log.debug("store_evtchn   = %d", store_evtchn)
+        log.debug("console_evtchn = %d", console_evtchn)
+        log.debug("cmdline        = %s", self.cmdline)
+        log.debug("ramdisk        = %s", self.ramdisk)
+        log.debug("vcpus          = %d", self.vm.getVCpuCount())
+        log.debug("features       = %s", self.vm.getFeatures())
+
+        devtree = FlatDeviceTree.build(self)
+
+        return xc.linux_build(dom            = self.vm.getDomid(),
+                              image          = self.kernel,
+                              store_evtchn   = store_evtchn,
+                              console_evtchn = console_evtchn,
+                              cmdline        = self.cmdline,
+                              ramdisk        = self.ramdisk,
+                              features       = self.vm.getFeatures(),
+                              arch_args      = devtree.to_bin())
+
 class HVMImageHandler(ImageHandler):
-
-    ostype = "hvm"
 
     def configure(self, imageConfig, deviceConfig):
         ImageHandler.configure(self, imageConfig, deviceConfig)
@@ -282,7 +305,7 @@ class HVMImageHandler(ImageHandler):
         for (name, info) in deviceConfig:
             if name == 'vbd':
                 uname = sxp.child_value(info, 'uname')
-                if 'file:' in uname:
+                if uname is not None and 'file:' in uname:
                     (_, vbdparam) = string.split(uname, ':', 1)
                     if not os.path.isfile(vbdparam):
                         raise VmError('Disk image does not exist: %s' %
@@ -355,32 +378,6 @@ class HVMImageHandler(ImageHandler):
         os.waitpid(self.pid, 0)
         self.pid = 0
 
-    def getDomainMemory(self, mem_kb):
-        """@see ImageHandler.getDomainMemory"""
-        if os.uname()[4] == 'ia64':
-            page_kb = 16
-            # ROM size for guest firmware, ioreq page and xenstore page
-            extra_pages = 1024 + 2
-        else:
-            page_kb = 4
-            # This was derived emperically:
-            #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
-            #   + 4 to avoid low-memory condition
-            extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
-            extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
-        return mem_kb + extra_pages * page_kb
-
-    def getDomainShadowMemory(self, mem_kb):
-        """@return The minimum shadow memory required, in KiB, for a domain 
-        with mem_kb KiB of RAM."""
-        if os.uname()[4] in ('ia64', 'ppc64'):
-            # Explicit shadow memory is not a concept 
-            return 0
-        else:
-            # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
-            # the minimum that Xen would allocate if no value were given.
-            return 1024 * self.vm.getVCpuCount() + mem_kb / 256
-
     def register_shutdown_watch(self):
         """ add xen store watch on control/shutdown """
         self.shutdownWatch = xswatch(self.vm.dompath + "/control/shutdown", \
@@ -417,15 +414,51 @@ class HVMImageHandler(ImageHandler):
 
         return 1 # Keep watching
 
-"""Table of image handler classes for virtual machine images.  Indexed by
-image type.
-"""
-imageHandlerClasses = {}
-
-
-for h in LinuxImageHandler, HVMImageHandler:
-    imageHandlerClasses[h.ostype] = h
-
+class IA64_HVM_ImageHandler(HVMImageHandler):
+
+    ostype = "hvm"
+
+    def getRequiredMemory(self, mem_kb):
+        page_kb = 16
+        # ROM size for guest firmware, ioreq page and xenstore page
+        extra_pages = 1024 + 2
+        return mem_kb + extra_pages * page_kb
+
+    def getRequiredShadowMemory(self, mem_kb):
+        # Explicit shadow memory is not a concept 
+        return 0
+
+class X86_HVM_ImageHandler(HVMImageHandler):
+
+    ostype = "hvm"
+
+    def getRequiredMemory(self, mem_kb):
+        page_kb = 4
+        # This was derived emperically:
+        #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
+        #   + 4 to avoid low-memory condition
+        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
+        extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
+        return mem_kb + extra_pages * page_kb
+
+    def getRequiredShadowMemory(self, mem_kb):
+        # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
+        # the minimum that Xen would allocate if no value were given.
+        return 1024 * self.vm.getVCpuCount() + mem_kb / 256
+
+_handlers = {
+    "powerpc": {
+        "linux": PPC_LinuxImageHandler,
+    },
+    "ia64": {
+        "linux": LinuxImageHandler,
+        "hvm": IA64_HVM_ImageHandler,
+    },
+    "x86": {
+        "linux": LinuxImageHandler,
+        "hvm": X86_HVM_ImageHandler,
+    },
+}
 
 def findImageHandlerClass(image):
     """Find the image handler class for an image config.
@@ -433,10 +466,10 @@ def findImageHandlerClass(image):
     @param image config
     @return ImageHandler subclass or None
     """
-    ty = sxp.name(image)
-    if ty is None:
+    type = sxp.name(image)
+    if type is None:
         raise VmError('missing image type')
-    imageClass = imageHandlerClasses.get(ty)
-    if imageClass is None:
-        raise VmError('unknown image type: ' + ty)
-    return imageClass
+    try:
+        return _handlers[arch.type][type]
+    except KeyError:
+        raise VmError('unknown image type: ' + type)
diff -r 18ce855ff594 -r e01441c9a607 
tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py      Tue Aug 29 17:53:57 
2006 -0500
+++ b/tools/python/xen/xend/server/XMLRPCServer.py      Wed Aug 30 14:09:31 
2006 -0500
@@ -78,7 +78,8 @@ methods = ['device_create', 'device_conf
 methods = ['device_create', 'device_configure', 'destroyDevice',
            'getDeviceSxprs',
            'setMemoryTarget', 'setName', 'setVCpuCount', 'shutdown',
-           'send_sysrq', 'getVCPUInfo', 'waitForDevices']
+           'send_sysrq', 'getVCPUInfo', 'waitForDevices',
+           'getRestartCount']
 
 exclude = ['domain_create', 'domain_restore']
 
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/server/blkif.py
--- a/tools/python/xen/xend/server/blkif.py     Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xend/server/blkif.py     Wed Aug 30 14:09:31 2006 -0500
@@ -52,10 +52,18 @@ class BlkifController(DevController):
         except ValueError:
             dev_type = "disk"
 
-        try:
-            (typ, params) = string.split(uname, ':', 1)
-        except ValueError:
-            (typ, params) = ("", "")
+        if uname is None:
+            if dev_type == 'cdrom':
+                (typ, params) = ("", "")
+            else:
+                raise VmError(
+                    'Block device must have physical details specified')
+        else:
+            try:
+                (typ, params) = string.split(uname, ':', 1)
+            except ValueError:
+                (typ, params) = ("", "")
+
         back = { 'dev'    : dev,
                  'type'   : typ,
                  'params' : params,
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xm/migrate.py
--- a/tools/python/xen/xm/migrate.py    Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xm/migrate.py    Wed Aug 30 14:09:31 2006 -0500
@@ -57,7 +57,8 @@ def main(argv):
         opts.usage()
         return
     if len(args) != 2:
-        opts.err('Invalid arguments: ' + str(args))
+        opts.usage()
+        sys.exit(1)
     dom = args[0]
     dst = args[1]
     server.xend.domain.migrate(dom, dst, opts.vals.live, opts.vals.resource, 
opts.vals.port)
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xm/shutdown.py
--- a/tools/python/xen/xm/shutdown.py   Tue Aug 29 17:53:57 2006 -0500
+++ b/tools/python/xen/xm/shutdown.py   Wed Aug 30 14:09:31 2006 -0500
@@ -48,21 +48,48 @@ gopts.opt('reboot', short='R',
           fn=set_true, default=0,
           use='Shutdown and reboot.')
 
+def wait_reboot(opts, doms, rcs):
+    while doms:
+        alive = server.xend.domains(0)
+        reboot = []
+        for d in doms:
+            if d in alive:
+                rc = server.xend.domain.getRestartCount(d)
+                if rc == rcs[d]: continue
+                reboot.append(d)
+            else:
+                opts.info("Domain %s destroyed for failed in rebooting" % d)
+                doms.remove(d)
+        for d in reboot:
+            opts.info("Domain %s rebooted" % d)
+            doms.remove(d)
+        time.sleep(1)
+    opts.info("All domains rebooted")
+
+def wait_shutdown(opts, doms):
+    while doms:
+        alive = server.xend.domains(0)
+        dead = []
+        for d in doms:
+            if d in alive: continue
+            dead.append(d)
+        for d in dead:
+            opts.info("Domain %s terminated" % d)
+            doms.remove(d)
+        time.sleep(1)
+    opts.info("All domains terminated")
+
 def shutdown(opts, doms, mode, wait):
+    rcs = {}
     for d in doms:
+        rcs[d] = server.xend.domain.getRestartCount(d)
         server.xend.domain.shutdown(d, mode)
+
     if wait:
-        while doms:
-            alive = server.xend.domains(0)
-            dead = []
-            for d in doms:
-                if d in alive: continue
-                dead.append(d)
-            for d in dead:
-                opts.info("Domain %s terminated" % d)
-                doms.remove(d)
-            time.sleep(1)
-        opts.info("All domains terminated")
+        if mode == 'reboot':
+            wait_reboot(opts, doms, rcs)
+        else:
+            wait_shutdown(opts, doms)
 
 def shutdown_mode(opts):
     if opts.vals.halt and opts.vals.reboot:
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/FlatDeviceTree.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xend/FlatDeviceTree.py   Wed Aug 30 14:09:31 2006 -0500
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# Copyright (C) IBM Corp. 2006
+#
+# Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
+
+import os
+import sys
+import struct
+import stat
+import re
+
+_OF_DT_HEADER = int("d00dfeed", 16) # avoid signed/unsigned FutureWarning
+_OF_DT_BEGIN_NODE = 0x1
+_OF_DT_END_NODE = 0x2
+_OF_DT_PROP = 0x3
+_OF_DT_END = 0x9
+
+def _bincat(seq, separator=''):
+    '''Concatenate the contents of seq into a bytestream.'''
+    strs = []
+    for item in seq:
+        if type(item) == type(0):
+            strs.append(struct.pack(">I", item))
+        else:
+            try:
+                strs.append(item.to_bin())
+            except AttributeError, e:
+                strs.append(item)
+    return separator.join(strs)
+
+def _alignup(val, alignment):
+    return (val + alignment - 1) & ~(alignment - 1)
+
+def _pad(buf, alignment):
+    '''Pad bytestream with NULLs to specified alignment.'''
+    padlen = _alignup(len(buf), alignment)
+    return buf + '\0' * (padlen - len(buf))
+    # not present in Python 2.3:
+    #return buf.ljust(_padlen, '\0')
+
+def _indent(item):
+    indented = []
+    for line in str(item).splitlines(True):
+        indented.append('    ' + line)
+    return ''.join(indented)
+
+class _Property:
+    _nonprint = re.compile('[\000-\037\200-\377]')
+    def __init__(self, node, name, value):
+        self.node = node
+        self.value = value
+        self.name = name
+        self.node.tree.stradd(name)
+
+    def __str__(self):
+        result = self.name
+        if self.value:
+            searchtext = self.value
+            # it's ok for a string to end in NULL
+            if searchtext.find('\000') == len(searchtext)-1:
+                searchtext = searchtext[:-1]
+            m = self._nonprint.search(searchtext)
+            if m:
+                bytes = struct.unpack("B" * len(self.value), self.value)
+                hexbytes = [ '%02x' % b for b in bytes ]
+                words = []
+                for i in range(0, len(self.value), 4):
+                    words.append(''.join(hexbytes[i:i+4]))
+                v = '<' + ' '.join(words) + '>'
+            else:
+                v = '"%s"' % self.value
+            result += ': ' + v
+        return result
+
+    def to_bin(self):
+        offset = self.node.tree.stroffset(self.name)
+        return struct.pack('>III', _OF_DT_PROP, len(self.value), offset) \
+            + _pad(self.value, 4)
+
+class _Node:
+    def __init__(self, tree, name):
+        self.tree = tree
+        self.name = name
+        self.props = {}
+        self.children = {}
+        self.phandle = 0
+
+    def __str__(self):
+        propstrs = [ _indent(prop) for prop in self.props.values() ]
+        childstrs = [ _indent(child) for child in self.children.values() ]
+        return '%s:\n%s\n%s' % (self.name, '\n'.join(propstrs),
+            '\n'.join(childstrs))
+
+    def to_bin(self):
+        name = _pad(self.name + '\0', 4)
+        return struct.pack('>I', _OF_DT_BEGIN_NODE) + \
+                name + \
+                _bincat(self.props.values()) + \
+                _bincat(self.children.values()) + \
+                struct.pack('>I', _OF_DT_END_NODE)
+
+    def addprop(self, propname, *cells):
+        '''setprop with duplicate error-checking.'''
+        if propname in self.props:
+            raise AttributeError('%s/%s already exists' % (self.name, 
propname))
+        self.setprop(propname, *cells)
+
+    def setprop(self, propname, *cells):
+        self.props[propname] = _Property(self, propname, _bincat(cells))
+
+    def addnode(self, nodename):
+        '''newnode with duplicate error-checking.'''
+        if nodename in self.children:
+            raise AttributeError('%s/%s already exists' % (self.name, 
nodename))
+        return self.newnode(nodename)
+
+    def newnode(self, nodename):
+        node = _Node(self.tree, nodename)
+        self.children[nodename] = node
+        return node
+
+    def getprop(self, propname):
+        return self.props[propname]
+
+    def getchild(self, nodename):
+        return self.children[nodename]
+
+    def get_phandle(self):
+        if self.phandle:
+            return self.phandle
+        self.phandle = self.tree.alloc_phandle()
+        self.addprop('linux,phandle', self.phandle)
+        return self.phandle
+
+class _Header:
+    def __init__(self):
+        self.magic = 0
+        self.totalsize = 0
+        self.off_dt_struct = 0
+        self.off_dt_strings = 0
+        self.off_mem_rsvmap = 0
+        self.version = 0
+        self.last_comp_version = 0
+        self.boot_cpuid_phys = 0
+        self.size_dt_strings = 0
+    def to_bin(self):
+        return struct.pack('>9I',
+            self.magic,
+            self.totalsize,
+            self.off_dt_struct,
+            self.off_dt_strings,
+            self.off_mem_rsvmap,
+            self.version,
+            self.last_comp_version,
+            self.boot_cpuid_phys,
+            self.size_dt_strings)
+
+class _StringBlock:
+    def __init__(self):
+        self.table = []
+    def to_bin(self):
+        return _bincat(self.table, '\0') + '\0'
+    def add(self, str):
+        self.table.append(str)
+    def getoffset(self, str):
+        return self.to_bin().index(str + '\0')
+
+class Tree(_Node):
+    def __init__(self):
+        self.last_phandle = 0
+        self.strings = _StringBlock()
+        self.reserved = [(0, 0)]
+        _Node.__init__(self, self, '\0')
+
+    def alloc_phandle(self):
+        self.last_phandle += 1
+        return self.last_phandle
+
+    def stradd(self, str):
+        return self.strings.add(str)
+
+    def stroffset(self, str):
+        return self.strings.getoffset(str)
+
+    def reserve(self, start, len):
+        self.reserved.insert(0, (start, len))
+
+    def to_bin(self):
+        # layout:
+        #   header
+        #   reservation map
+        #   string block
+        #   data block
+
+        datablock = _Node.to_bin(self)
+
+        r = [ struct.pack('>QQ', rsrv[0], rsrv[1]) for rsrv in self.reserved ]
+        reserved = _bincat(r)
+
+        strblock = _pad(self.strings.to_bin(), 4)
+        strblocklen = len(strblock)
+
+        header = _Header()
+        header.magic = _OF_DT_HEADER
+        header.off_mem_rsvmap = _alignup(len(header.to_bin()), 8)
+        header.off_dt_strings = header.off_mem_rsvmap + len(reserved)
+        header.off_dt_struct = header.off_dt_strings + strblocklen
+        header.version = 0x10
+        header.last_comp_version = 0x10
+        header.boot_cpuid_phys = 0
+        header.size_dt_strings = strblocklen
+
+        payload = reserved + \
+                strblock + \
+                datablock + \
+                struct.pack('>I', _OF_DT_END)
+        header.totalsize = len(payload) + _alignup(len(header.to_bin()), 8)
+        return _pad(header.to_bin(), 8) + payload
+
+_host_devtree_root = '/proc/device-tree'
+def _getprop(propname):
+    '''Extract a property from the system's device tree.'''
+    f = file(os.path.join(_host_devtree_root, propname), 'r')
+    data = f.read()
+    f.close()
+    return data
+
+def _copynode(node, dirpath, propfilter):
+    '''Extract all properties from a node in the system's device tree.'''
+    dirents = os.listdir(dirpath)
+    for dirent in dirents:
+        fullpath = os.path.join(dirpath, dirent)
+        st = os.lstat(fullpath)
+        if stat.S_ISDIR(st.st_mode):
+            child = node.addnode(dirent)
+            _copytree(child, fullpath, propfilter)
+        elif stat.S_ISREG(st.st_mode) and propfilter(fullpath):
+            node.addprop(dirent, _getprop(fullpath))
+
+def _copytree(node, dirpath, propfilter):
+    path = os.path.join(_host_devtree_root, dirpath)
+    _copynode(node, path, propfilter)
+
+def build(imghandler):
+    '''Construct a device tree by combining the domain's configuration and
+    the host's device tree.'''
+    root = Tree()
+
+    # 4 pages: start_info, console, store, shared_info
+    root.reserve(0x3ffc000, 0x4000)
+
+    root.addprop('device_type', 'chrp-but-not-really\0')
+    root.addprop('#size-cells', 2)
+    root.addprop('#address-cells', 2)
+    root.addprop('model', 'Momentum,Maple-D\0')
+    root.addprop('compatible', 'Momentum,Maple\0')
+
+    xen = root.addnode('xen')
+    xen.addprop('start-info', 0, 0x3ffc000, 0, 0x1000)
+    xen.addprop('version', 'Xen-3.0-unstable\0')
+    xen.addprop('reg', 0, imghandler.vm.domid, 0, 0)
+    xen.addprop('domain-name', imghandler.vm.getName() + '\0')
+    xencons = xen.addnode('console')
+    xencons.addprop('interrupts', 1, 0)
+
+    # XXX split out RMA node
+    mem = root.addnode('memory@0')
+    totalmem = imghandler.vm.getMemoryTarget() * 1024
+    mem.addprop('reg', 0, 0, 0, totalmem)
+    mem.addprop('device_type', 'memory\0')
+
+    cpus = root.addnode('cpus')
+    cpus.addprop('smp-enabled')
+    cpus.addprop('#size-cells', 0)
+    cpus.addprop('#address-cells', 1)
+
+    # Copy all properties the system firmware gave us, except for 'linux,'
+    # properties, from 'cpus/@0', once for every vcpu. Hopefully all cpus are
+    # identical...
+    cpu0 = None
+    def _nolinuxprops(fullpath):
+        return not os.path.basename(fullpath).startswith('linux,')
+    for i in range(imghandler.vm.getVCpuCount()):
+        cpu = cpus.addnode('PowerPC,970@0')
+        _copytree(cpu, 'cpus/PowerPC,970@0', _nolinuxprops)
+        # and then overwrite what we need to
+        pft_size = imghandler.vm.info.get('pft-size', 0x14)
+        cpu.setprop('ibm,pft-size', 0, pft_size)
+
+        # set default CPU
+        if cpu0 == None:
+            cpu0 = cpu
+
+    chosen = root.addnode('chosen')
+    chosen.addprop('cpu', cpu0.get_phandle())
+    chosen.addprop('memory', mem.get_phandle())
+    chosen.addprop('linux,stdout-path', '/xen/console\0')
+    chosen.addprop('interrupt-controller', xen.get_phandle())
+    chosen.addprop('bootargs', imghandler.cmdline + '\0')
+    # xc_linux_load.c will overwrite these 64-bit properties later
+    chosen.addprop('linux,initrd-start', 0, 0)
+    chosen.addprop('linux,initrd-end', 0, 0)
+
+    if 1:
+        f = file('/tmp/domU.dtb', 'w')
+        f.write(root.to_bin())
+        f.close()
+
+    return root
diff -r 18ce855ff594 -r e01441c9a607 tools/python/xen/xend/arch.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xend/arch.py     Wed Aug 30 14:09:31 2006 -0500
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# Copyright (C) IBM Corp. 2006
+#
+# Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
+
+import os
+
+_types = {
+    "i386": "x86",
+    "i486": "x86",
+    "i586": "x86",
+    "i686": "x86",
+    "x86_64": "x86",
+    "ia64": "ia64",
+    "ppc": "powerpc",
+    "ppc64": "powerpc",
+}
+type = _types.get(os.uname()[4], "unknown")

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.