[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Direct Linux boot: XenD changes for HVM direct Linux boot.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201864695 0
# Node ID 396ab902b02daf14deed0f3261c1c179b7378b4d
# Parent  67ca9c37ef02adedf0af62193159cb515eed28b7
Direct Linux boot: XenD changes for HVM direct Linux boot.

This patch provides the tools support for direct kernel boot of
HVM guests. Currently the config files in /etc/xen support the
args  'kernel', 'ramdisk' and 'extra'. For PV guests these have
the obvious meaning.

Unfortunately HVM guest configs hijacked the 'kernel' parameter
and use it to refer to the path of the HVM firmware. So, this
patch adds a new config file parameter called 'loader' which is
used to refer to the HVM firmware instead.

The conventions for loading the initrd image say that it should
live at the end of memory. This requires QEMU to know the size
of the guest's initial RAM allocation, so image.py is changed
to pass the '-m' flag to QEMU.

The HVMImageHandler class in image.py is changed so that if the
'kernel', 'ramdisk' or 'extra' params were given in the config
these are passed to QEMU with the '-kernel', '-initrd' and
'-append' flags respectively.

Finally, the 'loader' param is used as the arg to 'xc_hvm_build'
instead of the old 'kernel' param.

For the sake of compatability with old HVM guest config files,
if the config file has a 'kernel' param whose path matches that
of the HVM firmware, then we automatically convert this 'kernel'
param into the 'loader' param. This ensures existing HVM guests
work without changes required.

Signed-off-by: Daniel P. Berrange <berrange@xxxxxxxxxx>
---
 tools/python/xen/xend/XendConfig.py |   13 ++++++++++++-
 tools/python/xen/xend/image.py      |   24 +++++++++++++++++++-----
 tools/python/xen/xm/create.py       |    6 ++++++
 3 files changed, 37 insertions(+), 6 deletions(-)

diff -r 67ca9c37ef02 -r 396ab902b02d tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Fri Feb 01 11:16:37 2008 +0000
+++ b/tools/python/xen/xend/XendConfig.py       Fri Feb 01 11:18:15 2008 +0000
@@ -124,7 +124,7 @@ LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(
 LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
 
 # Platform configuration keys.
-XENAPI_PLATFORM_CFG = [ 'acpi', 'apic', 'boot', 'device_model', 'display', 
+XENAPI_PLATFORM_CFG = [ 'acpi', 'apic', 'boot', 'device_model', 'loader', 
'display', 
                         'fda', 'fdb', 'keymap', 'isa', 'localtime', 'monitor', 
                         'nographic', 'pae', 'rtc_timeoffset', 'serial', 'sdl',
                         'soundhw','stdvga', 'usb', 'usbdevice', 'vnc',
@@ -404,6 +404,17 @@ class XendConfig(dict):
                 self['platform']['device_model'] = 
xen.util.auxbin.pathTo("qemu-dm")
 
         if self.is_hvm():
+            if 'loader' not in self['platform']:
+                log.debug("No loader present")
+                # Old configs may have hvmloder set as PV_kernel param,
+                # so lets migrate them....
+                if self['PV_kernel'] == "/usr/lib/xen/boot/hvmloader":
+                    self['platform']['loader'] = self['PV_kernel']
+                    log.debug("Loader copied from kernel %s" % 
str(self['platform']['loader']))
+                else:
+                    self['platform']['loader'] = "/usr/lib/xen/boot/hvmloader"
+                    log.debug("Loader %s" % str(self['platform']['loader']))
+
             # Compatibility hack, can go away soon.
             if 'soundhw' not in self['platform'] and \
                self['platform'].get('enable_audio'):
diff -r 67ca9c37ef02 -r 396ab902b02d tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Fri Feb 01 11:16:37 2008 +0000
+++ b/tools/python/xen/xend/image.py    Fri Feb 01 11:18:15 2008 +0000
@@ -127,7 +127,7 @@ class ImageHandler:
         """
         # Set params and call buildDomain().
 
-        if not os.path.isfile(self.kernel):
+        if self.kernel and not os.path.isfile(self.kernel):
             raise VmError('Kernel image does not exist: %s' % self.kernel)
         if self.ramdisk and not os.path.isfile(self.ramdisk):
             raise VmError('Kernel ramdisk does not exist: %s' % self.ramdisk)
@@ -186,6 +186,10 @@ class ImageHandler:
     # xm config file
     def parseDeviceModelArgs(self, vmConfig):
         ret = ["-domain-name", str(self.vm.info['name_label'])]
+
+        # Tell QEMU how large the guest's memory allocation is
+        # to help it when loading the initrd (if neccessary)
+        ret += ["-m", str(self.getRequiredInitialReservation() / 1024)]
 
         # Find RFB console device, and if it exists, make QEMU enable
         # the VNC console.
@@ -420,8 +424,7 @@ class HVMImageHandler(ImageHandler):
     def configure(self, vmConfig):
         ImageHandler.configure(self, vmConfig)
 
-        if not self.kernel:
-            self.kernel = '/usr/lib/xen/boot/hvmloader'
+        self.loader = vmConfig['platform'].get('loader')
 
         info = xc.xeninfo()
         if 'hvm' not in info['xen_caps']:
@@ -445,6 +448,17 @@ class HVMImageHandler(ImageHandler):
     def parseDeviceModelArgs(self, vmConfig):
         ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
         ret = ret + ['-vcpus', str(self.vm.getVCpuCount())]
+
+        if self.kernel and self.kernel != "/usr/lib/xen/boot/hvmloader":
+            log.debug("kernel         = %s", self.kernel)
+            ret = ret + ['-kernel', self.kernel]
+        if self.ramdisk:
+            log.debug("ramdisk        = %s", self.ramdisk)
+            ret = ret + ['-initrd', self.ramdisk]
+        if self.cmdline:
+            log.debug("cmdline        = %s", self.cmdline)
+            ret = ret + ['-append', self.cmdline]
+
 
         dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
                    'localtime', 'serial', 'stdvga', 'isa',
@@ -521,7 +535,7 @@ class HVMImageHandler(ImageHandler):
         mem_mb = self.getRequiredInitialReservation() / 1024
 
         log.debug("domid          = %d", self.vm.getDomid())
-        log.debug("image          = %s", self.kernel)
+        log.debug("image          = %s", self.loader)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("memsize        = %d", mem_mb)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
@@ -529,7 +543,7 @@ class HVMImageHandler(ImageHandler):
         log.debug("apic           = %d", self.apic)
 
         rc = xc.hvm_build(domid          = self.vm.getDomid(),
-                          image          = self.kernel,
+                          image          = self.loader,
                           memsize        = mem_mb,
                           vcpus          = self.vm.getVCpuCount(),
                           acpi           = self.acpi,
diff -r 67ca9c37ef02 -r 396ab902b02d tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Fri Feb 01 11:16:37 2008 +0000
+++ b/tools/python/xen/xm/create.py     Fri Feb 01 11:18:15 2008 +0000
@@ -158,6 +158,10 @@ gopts.var('ramdisk', val='FILE',
           fn=set_value, default='',
           use="Path to ramdisk.")
 
+gopts.var('loader', val='FILE',
+          fn=set_value, default='',
+          use="Path to HVM firmware.")
+
 gopts.var('features', val='FEATURES',
           fn=set_value, default='',
           use="Features to enable in guest kernel")
@@ -561,6 +565,8 @@ def configure_image(vals):
         config_image.append([ 'kernel', os.path.abspath(vals.kernel) ])
     if vals.ramdisk:
         config_image.append([ 'ramdisk', os.path.abspath(vals.ramdisk) ])
+    if vals.loader:
+        config_image.append([ 'loader', os.path.abspath(vals.loader) ])
     if vals.cmdline_ip:
         cmdline_ip = strip('ip=', vals.cmdline_ip)
         config_image.append(['ip', cmdline_ip])

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.