[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merged.



# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID c668b024b2c71a75494d2221433405629f9d7596
# Parent  b7802a60b09fe181a36e1a32484fe434d9b38e98
# Parent  066a233ee3659fb1e52a4bca05727e5cdd12004a
Merged.

diff -r b7802a60b09f -r c668b024b2c7 Config.mk
--- a/Config.mk Mon Apr 10 15:28:52 2006
+++ b/Config.mk Mon Apr 10 15:36:03 2006
@@ -24,6 +24,7 @@
 OBJDUMP    = $(CROSS_COMPILE)objdump
 
 DISTDIR     ?= $(XEN_ROOT)/dist
+DESTDIR     ?= /
 
 INSTALL      = install
 INSTALL_DIR  = $(INSTALL) -d -m0755
diff -r b7802a60b09f -r c668b024b2c7 Makefile
--- a/Makefile  Mon Apr 10 15:28:52 2006
+++ b/Makefile  Mon Apr 10 15:36:03 2006
@@ -115,18 +115,6 @@
 # Linux name for GNU distclean
 mrproper: distclean
 
-install-logging: LOGGING=logging-0.4.9.2
-install-logging:
-       [ -f $(LOGGING).tar.gz ] || wget 
http://www.red-dove.com/$(LOGGING).tar.gz
-       tar -zxf $(LOGGING).tar.gz
-       cd $(LOGGING) && python setup.py install
-
-# handy target to upgrade iptables (use rpm or apt-get in preference)
-install-iptables:
-       wget http://www.netfilter.org/files/iptables-1.2.11.tar.bz2
-       tar -jxf iptables-1.2.11.tar.bz2
-       $(MAKE) -C iptables-1.2.11 PREFIX= 
KERNEL_DIR=../linux-$(LINUX_VER)-xen0 install
-
 help:
        @echo 'Installation targets:'
        @echo '  install          - build and install everything'
@@ -147,23 +135,25 @@
        @echo '  dev-docs         - build developer-only documentation'
        @echo ''
        @echo 'Cleaning targets:'
-       @echo '  clean            - clean the Xen, tools and docs (but not'
-       @echo '                     guest kernel) trees'
-       @echo '  distclean        - clean plus delete kernel tarballs and 
kernel'
-       @echo '                     build trees'
+       @echo '  clean            - clean the Xen, tools and docs (but not 
guest kernel trees)'
+       @echo '  distclean        - clean plus delete kernel build trees and'
+       @echo '                     local downloaded files'
        @echo '  kdelete          - delete guest kernel build trees'
        @echo '  kclean           - clean guest kernel build trees'
-       @echo ''
-       @echo 'Dependency installation targets:'
-       @echo '  install-logging  - install the Python Logging package'
-       @echo '  install-iptables - install iptables tools'
        @echo ''
        @echo 'Miscellaneous targets:'
        @echo '  prep-kernels     - prepares kernel directories, does not build'
        @echo '  mkpatches        - make patches against vanilla kernels from'
        @echo '                     sparse trees'
-       @echo '  uninstall        - attempt to remove installed Xen tools (use'
-       @echo '                     with extreme care!)'
+       @echo '  uninstall        - attempt to remove installed Xen tools'
+       @echo '                     (use with extreme care!)'
+       @echo
+       @echo 'Environment:'
+       @echo '  XEN_PYTHON_NATIVE_INSTALL=y'
+       @echo '                   - native python install or dist'
+       @echo '                     install into prefix/lib/python<VERSION>'
+       @echo '                     instead of <PREFIX>/lib/python'
+       @echo '                     true if set to non-empty value, false 
otherwise'
 
 # Use this target with extreme care!
 uninstall: D=$(DESTDIR)
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/Rules.mk
--- a/buildconfigs/Rules.mk     Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/Rules.mk     Mon Apr 10 15:36:03 2006
@@ -39,29 +39,6 @@
 patch-%.bz2:
        @echo "Cannot find $(@F) in path $(LINUX_SRC_PATH)"
        wget 
$(KERNEL_REPO)/pub/linux/kernel/v$(_LINUX_VDIR)/$(_LINUX_XDIR)/$(@F) -O./$@
-
-# Expand NetBSD release to NetBSD version
-NETBSD_RELEASE  ?= 2.0
-NETBSD_VER      ?= $(patsubst netbsd-%-xen-sparse,%,$(wildcard 
netbsd-$(NETBSD_RELEASE)*-xen-sparse))
-NETBSD_CVSSNAP  ?= 20050309
-
-# Setup NetBSD search path
-NETBSD_SRC_PATH        ?= .:..
-vpath netbsd-%.tar.bz2 $(NETBSD_SRC_PATH)
-
-# download a pristine NetBSD tarball if there isn't one in NETBSD_SRC_PATH
-netbsd-%-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2:
-       @echo "Cannot find $@ in path $(NETBSD_SRC_PATH)"
-       wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/$@ -O./$@
-
-netbsd-%.tar.bz2: netbsd-%-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2
-       ln -fs $< $@
-
-ifeq ($(OS),linux)
-OS_VER = $(LINUX_VER)
-else
-OS_VER = $(NETBSD_VER)
-endif
 
 pristine-%: pristine-%/.valid-pristine
        @true
@@ -124,26 +101,19 @@
        rm -rf tmp-$@
        cp -al $(<D) tmp-$@
        ( cd linux-2.6-xen-sparse && ./mkbuildtree ../tmp-$@ )  
-       diff -Nurp $(<D) tmp-$@ > $@ || true
+       diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
        rm -rf tmp-$@
 
 %-xen.patch: ref-%/.valid-ref
        rm -rf tmp-$@
        cp -al $(<D) tmp-$@
        ( cd $*-xen-sparse && ./mkbuildtree ../tmp-$@ ) 
-       diff -Nurp $(<D) tmp-$@ > $@ || true
+       diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
        rm -rf tmp-$@
 
-%-mrproper: %-mrproper-extra
+%-mrproper:
        rm -rf pristine-$(*)* ref-$(*)* $*.tar.bz2
        rm -rf $*-xen.patch
-
-netbsd-%-mrproper-extra:
-       rm -rf netbsd-$*-tools netbsd-$*-tools.tar.bz2
-       rm -f netbsd-$*-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2
-
-%-mrproper-extra:
-       @: # do nothing
 
 config-update-pae:
 ifeq ($(XEN_TARGET_X86_PAE),y)
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/linux-defconfig_xen0_x86_32
--- a/buildconfigs/linux-defconfig_xen0_x86_32  Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/linux-defconfig_xen0_x86_32  Mon Apr 10 15:36:03 2006
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16-rc3-xen0
-# Thu Feb 16 22:52:42 2006
+# Linux kernel version: 2.6.16-xen0
+# Sat Apr  8 11:34:07 2006
 #
 CONFIG_X86_32=y
 CONFIG_SEMAPHORE_SLEEPERS=y
@@ -208,7 +208,6 @@
 CONFIG_ACPI_EC=y
 CONFIG_ACPI_POWER=y
 CONFIG_ACPI_SYSTEM=y
-# CONFIG_X86_PM_TIMER is not set
 # CONFIG_ACPI_CONTAINER is not set
 
 #
@@ -392,7 +391,13 @@
 #
 # Plug and Play support
 #
-# CONFIG_PNP is not set
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
 
 #
 # Block devices
@@ -440,6 +445,7 @@
 #
 CONFIG_IDE_GENERIC=y
 # CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
 CONFIG_BLK_DEV_IDEPCI=y
 # CONFIG_IDEPCI_SHARE_IRQ is not set
 # CONFIG_BLK_DEV_OFFBOARD is not set
@@ -623,6 +629,7 @@
 # CONFIG_BONDING is not set
 # CONFIG_EQUALIZER is not set
 CONFIG_TUN=y
+# CONFIG_NET_SB1000 is not set
 
 #
 # ARCnet devices
@@ -1064,11 +1071,7 @@
 # CONFIG_INFINIBAND is not set
 
 #
-# SN Devices
-#
-
-#
-# EDAC - error detection and reporting (RAS)
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
 #
 # CONFIG_EDAC is not set
 
@@ -1306,7 +1309,7 @@
 #
 # CONFIG_CRYPTO_DEV_PADLOCK is not set
 CONFIG_XEN=y
-CONFIG_NO_IDLE_HZ=y
+CONFIG_XEN_INTERFACE_VERSION=0x00030101
 
 #
 # XEN
@@ -1332,6 +1335,7 @@
 CONFIG_XEN_SYSFS=y
 CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_NO_IDLE_HZ=y
 
 #
 # Library routines
@@ -1344,4 +1348,6 @@
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_NO_TSS=y
+CONFIG_X86_NO_IDT=y
 CONFIG_KTIME_SCALAR=y
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/linux-defconfig_xen0_x86_64
--- a/buildconfigs/linux-defconfig_xen0_x86_64  Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/linux-defconfig_xen0_x86_64  Mon Apr 10 15:36:03 2006
@@ -327,7 +327,13 @@
 #
 # Plug and Play support
 #
-# CONFIG_PNP is not set
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
 
 #
 # Block devices
@@ -375,6 +381,7 @@
 #
 CONFIG_IDE_GENERIC=y
 # CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
 CONFIG_BLK_DEV_IDEPCI=y
 # CONFIG_IDEPCI_SHARE_IRQ is not set
 # CONFIG_BLK_DEV_OFFBOARD is not set
@@ -559,6 +566,7 @@
 # CONFIG_BONDING is not set
 # CONFIG_EQUALIZER is not set
 CONFIG_TUN=y
+# CONFIG_NET_SB1000 is not set
 
 #
 # ARCnet devices
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/linux-defconfig_xen_x86_32
--- a/buildconfigs/linux-defconfig_xen_x86_32   Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/linux-defconfig_xen_x86_32   Mon Apr 10 15:36:03 2006
@@ -912,12 +912,12 @@
 # Plug and Play support
 #
 CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
+CONFIG_PNP_DEBUG=y
 
 #
 # Protocols
 #
-# CONFIG_PNPACPI is not set
+CONFIG_PNPACPI=y
 
 #
 # Block devices
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/linux-defconfig_xen_x86_64
--- a/buildconfigs/linux-defconfig_xen_x86_64   Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/linux-defconfig_xen_x86_64   Mon Apr 10 15:36:03 2006
@@ -776,7 +776,13 @@
 #
 # Plug and Play support
 #
-# CONFIG_PNP is not set
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
 
 #
 # Block devices
@@ -857,6 +863,7 @@
 CONFIG_IDE_GENERIC=y
 CONFIG_BLK_DEV_CMD640=y
 CONFIG_BLK_DEV_CMD640_ENHANCED=y
+CONFIG_BLK_DEV_IDEPNP=y
 CONFIG_BLK_DEV_IDEPCI=y
 CONFIG_IDEPCI_SHARE_IRQ=y
 # CONFIG_BLK_DEV_OFFBOARD is not set
@@ -1088,6 +1095,7 @@
 CONFIG_BONDING=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
+CONFIG_NET_SB1000=m
 
 #
 # ARCnet devices
diff -r b7802a60b09f -r c668b024b2c7 buildconfigs/mk.linux-2.6-xen
--- a/buildconfigs/mk.linux-2.6-xen     Mon Apr 10 15:28:52 2006
+++ b/buildconfigs/mk.linux-2.6-xen     Mon Apr 10 15:36:03 2006
@@ -1,13 +1,10 @@
-
-OS           = linux
-
 LINUX_SERIES = 2.6
 LINUX_VER    = 2.6.16
 LINUX_SRCS = linux-2.6.16.tar.bz2
 
 EXTRAVERSION ?= xen
 
-LINUX_DIR    = $(OS)-$(LINUX_VER)-$(EXTRAVERSION)
+LINUX_DIR    = linux-$(LINUX_VER)-$(EXTRAVERSION)
 
 include buildconfigs/Rules.mk
 
@@ -22,7 +19,7 @@
        $(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) INSTALL_PATH=$(DESTDIR) 
vmlinuz
        $(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) INSTALL_PATH=$(DESTDIR) 
install
 
-$(LINUX_DIR)/include/linux/autoconf.h: ref-$(OS)-$(LINUX_VER)/.valid-ref
+$(LINUX_DIR)/include/linux/autoconf.h: ref-linux-$(LINUX_VER)/.valid-ref
        rm -rf $(LINUX_DIR)
        cp -al $(<D) $(LINUX_DIR)
        # Apply arch-xen patches
@@ -52,4 +49,4 @@
        $(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) clean
 
 delete: 
-       rm -rf tmp-$(OS)-$(LINUX_VER) $(LINUX_DIR) 
+       rm -rf tmp-linux-$(LINUX_VER) $(LINUX_DIR) 
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c       Mon Apr 10 
15:28:52 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c       Mon Apr 10 
15:36:03 2006
@@ -69,7 +69,7 @@
        } else {
                for (i = 0; i < nents; i++ ) {
                        sg[i].dma_address =
-                               page_to_phys(sg[i].page) + sg[i].offset;
+                               page_to_bus(sg[i].page) + sg[i].offset;
                        sg[i].dma_length  = sg[i].length;
                        BUG_ON(!sg[i].page);
                        IOMMU_BUG_ON(address_needs_mapping(
@@ -105,7 +105,7 @@
                dma_addr = swiotlb_map_page(
                        dev, page, offset, size, direction);
        } else {
-               dma_addr = page_to_phys(page) + offset;
+               dma_addr = page_to_bus(page) + offset;
                IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
        }
 
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/arch/i386/kernel/setup-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/setup-xen.c Mon Apr 10 15:28:52 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/setup-xen.c Mon Apr 10 15:36:03 2006
@@ -1848,10 +1848,6 @@
                get_smp_config();
 #endif
 
-       /* XXX Disable irqdebug until we have a way to avoid interrupt
-        * conflicts. */
-       noirqdebug_setup("");
-
        register_memory();
 
        if (xen_start_info->flags & SIF_INITDOMAIN) {
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c
--- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c   Mon Apr 10 15:28:52 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c   Mon Apr 10 15:36:03 2006
@@ -32,7 +32,7 @@
 
 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
 
-#define SG_ENT_PHYS_ADDRESS(sg)        (page_to_phys((sg)->page) + 
(sg)->offset)
+#define SG_ENT_PHYS_ADDRESS(sg)        (page_to_bus((sg)->page) + (sg)->offset)
 
 /*
  * Maximum allowable number of contiguous slabs to map,
@@ -607,7 +607,7 @@
        dma_addr_t dev_addr;
        char *map;
 
-       dev_addr = page_to_phys(page) + offset;
+       dev_addr = page_to_bus(page) + offset;
        if (address_needs_mapping(hwdev, dev_addr)) {
                buffer.page   = page;
                buffer.offset = offset;
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/drivers/xen/core/reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c    Mon Apr 10 15:28:52 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c    Mon Apr 10 15:36:03 2006
@@ -59,6 +59,10 @@
 {
        /* We really want to get pending console data out before we die. */
        xencons_force_flush();
+#if defined(__i386__) || defined(__x86_64__)
+       if (pm_power_off)
+               pm_power_off();
+#endif
        HYPERVISOR_shutdown(SHUTDOWN_poweroff);
 }
 
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/io.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/io.h   Mon Apr 10 
15:28:52 2006
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/io.h   Mon Apr 10 
15:36:03 2006
@@ -102,6 +102,7 @@
  */
 #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
 #define page_to_phys(page)      (phys_to_machine(page_to_pseudophys(page)))
+#define page_to_bus(page)       (phys_to_machine(page_to_pseudophys(page)))
 
 #define bio_to_pseudophys(bio)  (page_to_pseudophys(bio_page((bio))) + \
                                  (unsigned long) bio_offset((bio)))
diff -r b7802a60b09f -r c668b024b2c7 
linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/io.h
--- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/io.h Mon Apr 10 
15:28:52 2006
+++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/io.h Mon Apr 10 
15:36:03 2006
@@ -130,6 +130,7 @@
  */
 #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
 #define page_to_phys(page)      (phys_to_machine(page_to_pseudophys(page)))
+#define page_to_bus(page)       (phys_to_machine(page_to_pseudophys(page)))
 
 #define bio_to_pseudophys(bio)  (page_to_pseudophys(bio_page((bio))) + \
                                  (unsigned long) bio_offset((bio)))
diff -r b7802a60b09f -r c668b024b2c7 tools/ioemu/hw/vga.c
--- a/tools/ioemu/hw/vga.c      Mon Apr 10 15:28:52 2006
+++ b/tools/ioemu/hw/vga.c      Mon Apr 10 15:36:03 2006
@@ -1369,10 +1369,10 @@
 {
     unsigned int eax, edx;
 
-    __asm__("cpuid"
+    __asm__("pushl %%ebx; cpuid; popl %%ebx"
             : "=a" (eax), "=d" (edx)
             : "0" (op)
-            : "bx", "cx");
+            : "cx");
 
     return edx;
 }
diff -r b7802a60b09f -r c668b024b2c7 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Mon Apr 10 15:28:52 2006
+++ b/tools/libxc/xc_linux_build.c      Mon Apr 10 15:36:03 2006
@@ -110,10 +110,10 @@
 
         if ( i == XENFEAT_NR_SUBMAPS*32 )
         {
-            ERROR("Unknown feature \"%.*s\".\n", (int)(p-feats), feats);
+            ERROR("Unknown feature \"%.*s\".", (int)(p-feats), feats);
             if ( req )
             {
-                ERROR("Kernel requires an unknown hypervisor feature.\n");
+                ERROR("Kernel requires an unknown hypervisor feature.");
                 return -EINVAL;
             }
         }
@@ -579,6 +579,31 @@
     return -1;
 }
 #else /* x86 */
+
+/* Check if the platform supports the guest kernel format */
+static int compat_check(int xc_handle, struct domain_setup_info *dsi)
+{
+    xen_capabilities_info_t xen_caps = "";
+
+    if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) {
+        ERROR("Cannot determine host capabilities.");
+        return 0;
+    }
+
+    if (strstr(xen_caps, "xen-3.0-x86_32p")) {
+        if (!dsi->pae_kernel) {
+            ERROR("Non PAE-kernel on PAE host.");
+            return 0;
+        }
+    } else if (dsi->pae_kernel) {
+        ERROR("PAE-kernel on non-PAE host.");
+        return 0;
+    }
+
+    return 1;
+}
+
+
 static int setup_guest(int xc_handle,
                        uint32_t dom,
                        const char *image, unsigned long image_size,
@@ -635,9 +660,12 @@
 
     if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
     {
-        PERROR("Guest OS must load to a page boundary.\n");
-        goto error_out;
-    }
+        PERROR("Guest OS must load to a page boundary.");
+        goto error_out;
+    }
+
+    if (!compat_check(xc_handle, &dsi))
+        goto error_out;
 
     /* Parse and validate kernel features. */
     p = strstr(dsi.xen_guest_string, "FEATURES=");
@@ -647,7 +675,7 @@
                              supported_features,
                              required_features) )
         {
-            ERROR("Failed to parse guest kernel features.\n");
+            ERROR("Failed to parse guest kernel features.");
             goto error_out;
         }
 
@@ -659,7 +687,7 @@
     {
         if ( (supported_features[i]&required_features[i]) != 
required_features[i] )
         {
-            ERROR("Guest kernel does not support a required feature.\n");
+            ERROR("Guest kernel does not support a required feature.");
             goto error_out;
         }
     }
diff -r b7802a60b09f -r c668b024b2c7 tools/libxc/xc_load_elf.c
--- a/tools/libxc/xc_load_elf.c Mon Apr 10 15:28:52 2006
+++ b/tools/libxc/xc_load_elf.c Mon Apr 10 15:36:03 2006
@@ -66,6 +66,21 @@
     if ( !IS_ELF(*ehdr) )
     {
         ERROR("Kernel image does not have an ELF header.");
+        return -EINVAL;
+    }
+
+    if (
+#if defined(__i386__)
+        (ehdr->e_ident[EI_CLASS] != ELFCLASS32) ||
+        (ehdr->e_machine != EM_386) ||
+#elif defined(__x86_64__)
+        (ehdr->e_ident[EI_CLASS] != ELFCLASS64) ||
+        (ehdr->e_machine != EM_X86_64) ||
+#endif
+        (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) ||
+        (ehdr->e_type != ET_EXEC) )
+    {
+        ERROR("Kernel not a Xen-compatible Elf image.");
         return -EINVAL;
     }
 
diff -r b7802a60b09f -r c668b024b2c7 tools/python/xen/xend/server/pciif.py
--- a/tools/python/xen/xend/server/pciif.py     Mon Apr 10 15:28:52 2006
+++ b/tools/python/xen/xend/server/pciif.py     Mon Apr 10 15:36:03 2006
@@ -115,7 +115,7 @@
             dev = PciDevice(domain, bus, slot, func)
         except Exception, e:
             raise VmError("pci: failed to locate device and "+
-                    "parse it's resources - %s"+str(e))
+                    "parse it's resources - "+str(e))
 
         if dev.driver!='pciback':
             raise VmError(("pci: PCI Backend does not own device "+ \
@@ -131,7 +131,7 @@
                     nr_ports = size, allow_access = True)
             if rc<0:
                 raise VmError(('pci: failed to configure I/O ports on device '+
-                            '%s - errno=%d')&(dev.name,rc))
+                            '%s - errno=%d')%(dev.name,rc))
             
         for (start, size) in dev.iomem:
             # Convert start/size from bytes to page frame sizes
@@ -147,7 +147,7 @@
                     allow_access = True)
             if rc<0:
                 raise VmError(('pci: failed to configure I/O memory on device 
'+
-                            '%s - errno=%d')&(dev.name,rc))
+                            '%s - errno=%d')%(dev.name,rc))
 
         if dev.irq>0:
             log.debug('pci: enabling irq %d'%dev.irq)
@@ -155,7 +155,7 @@
                     allow_access = True)
             if rc<0:
                 raise VmError(('pci: failed to configure irq on device '+
-                            '%s - errno=%d')&(dev.name,rc))
+                            '%s - errno=%d')%(dev.name,rc))
 
     def waitForBackend(self,devid):
         return (0, "ok - no hotplug")
diff -r b7802a60b09f -r c668b024b2c7 xen/Rules.mk
--- a/xen/Rules.mk      Mon Apr 10 15:28:52 2006
+++ b/xen/Rules.mk      Mon Apr 10 15:36:03 2006
@@ -74,7 +74,7 @@
 subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y)))
 
 # Add explicitly declared subdirectories to the object list.
-obj-y += $(patsubst %,%/built_in.o,$(subdir-y))
+obj-y += $(patsubst %/,%/built_in.o,$(subdir-y))
 
 # Add implicitly declared subdirectories (in the object list) to the
 # subdirectory list, and rewrite the object-list entry.
diff -r b7802a60b09f -r c668b024b2c7 xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Mon Apr 10 15:28:52 2006
+++ b/xen/arch/ia64/xen/irq.c   Mon Apr 10 15:36:03 2006
@@ -1358,25 +1358,20 @@
 int pirq_guest_unmask(struct domain *d)
 {
     irq_desc_t    *desc;
-    int            i, j, pirq;
-    u32            m;
+    int            pirq;
     shared_info_t *s = d->shared_info;
 
-    for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
+    for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
+          pirq < NR_PIRQS;
+          pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
     {
-        m = d->pirq_mask[i];
-        while ( (j = ffs(m)) != 0 )
-        {
-            m &= ~(1 << --j);
-            pirq = (i << 5) + j;
-            desc = &irq_desc[pirq];
-            spin_lock_irq(&desc->lock);
-            if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
-                 test_and_clear_bit(pirq, &d->pirq_mask) &&
-                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
-                desc->handler->end(pirq);
-            spin_unlock_irq(&desc->lock);
-        }
+        desc = &irq_desc[pirq];
+        spin_lock_irq(&desc->lock);
+        if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
+             test_and_clear_bit(pirq, &d->pirq_mask) &&
+             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+            desc->handler->end(pirq);
+        spin_unlock_irq(&desc->lock);
     }
 
     return 0;
diff -r b7802a60b09f -r c668b024b2c7 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Mon Apr 10 15:28:52 2006
+++ b/xen/arch/x86/io_apic.c    Mon Apr 10 15:36:03 2006
@@ -75,6 +75,7 @@
 static struct irq_pin_list {
     int apic, pin, next;
 } irq_2_pin[PIN_MAP_SIZE];
+static int irq_2_pin_free_entry = NR_IRQS;
 
 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
 
@@ -85,20 +86,57 @@
  */
 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
 {
-    static int first_free_entry = NR_IRQS;
     struct irq_pin_list *entry = irq_2_pin + irq;
 
-    while (entry->next)
+    while (entry->next) {
+        BUG_ON((entry->apic == apic) && (entry->pin == pin));
         entry = irq_2_pin + entry->next;
+    }
+
+    BUG_ON((entry->apic == apic) && (entry->pin == pin));
 
     if (entry->pin != -1) {
-        entry->next = first_free_entry;
+        if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
+            panic("io_apic.c: whoops");
+        entry->next = irq_2_pin_free_entry;
         entry = irq_2_pin + entry->next;
-        if (++first_free_entry >= PIN_MAP_SIZE)
-            panic("io_apic.c: whoops");
+        irq_2_pin_free_entry = entry->next;
+        entry->next = 0;
     }
     entry->apic = apic;
     entry->pin = pin;
+}
+
+static void remove_pin_at_irq(unsigned int irq, int apic, int pin)
+{
+    struct irq_pin_list *entry, *prev;
+
+    for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) {
+        if ((entry->apic == apic) && (entry->pin == pin))
+            break;
+        if (!entry->next)
+            BUG();
+    }
+
+    entry->pin = entry->apic = -1;
+    
+    if (entry != &irq_2_pin[irq]) {
+        /* Removed entry is not at head of list. */
+        prev = &irq_2_pin[irq];
+        while (&irq_2_pin[prev->next] != entry)
+            prev = &irq_2_pin[prev->next];
+        prev->next = entry->next;
+        entry->next = irq_2_pin_free_entry;
+        irq_2_pin_free_entry = entry - irq_2_pin;
+    } else if (entry->next != 0) {
+        /* Removed entry is at head of multi-item list. */
+        prev  = entry;
+        entry = &irq_2_pin[entry->next];
+        *prev = *entry;
+        entry->pin = entry->apic = -1;
+        entry->next = irq_2_pin_free_entry;
+        irq_2_pin_free_entry = entry - irq_2_pin;
+    }
 }
 
 /*
@@ -958,6 +996,10 @@
         irq_2_pin[i].pin = -1;
         irq_2_pin[i].next = 0;
     }
+
+    /* Initialise dynamic irq_2_pin free list. */
+    for (i = NR_IRQS; i < PIN_MAP_SIZE; i++)
+        irq_2_pin[i].next = i + 1;
 
     /*
      * The number of IO-APIC IRQ registers (== #pins):
@@ -1854,11 +1896,17 @@
     return 0;
 }
 
+#define WARN_BOGUS_WRITE(f, a...)                                       \
+    DPRINTK("\n%s: apic=%d, pin=%d, old_irq=%d, new_irq=%d\n"           \
+            "%s: old_entry=%08x, new_entry=%08x\n"                      \
+            "%s: " f, __FUNCTION__, apic, pin, old_irq, new_irq,        \
+            __FUNCTION__, *(u32 *)&old_rte, *(u32 *)&new_rte,           \
+            __FUNCTION__ , ##a )
+
 int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
 {
-    int apic, pin, irq;
-    struct IO_APIC_route_entry rte = { 0 };
-    struct irq_pin_list *entry;
+    int apic, pin, old_irq = -1, new_irq = -1;
+    struct IO_APIC_route_entry old_rte = { 0 }, new_rte = { 0 };
     unsigned long flags;
 
     if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
@@ -1870,8 +1918,9 @@
     
     pin = (reg - 0x10) >> 1;
 
-    *(u32 *)&rte = val;
-    rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+    /* Write first half from guest; second half is target info. */
+    *(u32 *)&new_rte = val;
+    new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
 
     /*
      * What about weird destination types?
@@ -1881,7 +1930,7 @@
      *  ExtINT: Ignore? Linux only asserts this at start of day.
      * For now, print a message and return an error. We can fix up on demand.
      */
-    if ( rte.delivery_mode > dest_LowestPrio )
+    if ( new_rte.delivery_mode > dest_LowestPrio )
     {
         printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
         printk("       APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
@@ -1892,36 +1941,69 @@
      * The guest does not know physical APIC arrangement (flat vs. cluster).
      * Apply genapic conventions for this platform.
      */
-    rte.delivery_mode = INT_DELIVERY_MODE;
-    rte.dest_mode     = INT_DEST_MODE;
-
-    if ( rte.vector >= FIRST_DEVICE_VECTOR )
-    {
-        /* Is there a valid irq mapped to this vector? */
-        irq = vector_irq[rte.vector];
-        if ( !IO_APIC_IRQ(irq) )
+    new_rte.delivery_mode = INT_DELIVERY_MODE;
+    new_rte.dest_mode     = INT_DEST_MODE;
+
+    spin_lock_irqsave(&ioapic_lock, flags);
+
+    /* Read first (interesting) half of current routing entry. */
+    *(u32 *)&old_rte = io_apic_read(apic, 0x10 + 2 * pin);
+
+    /* No change to the first half of the routing entry? Bail quietly. */
+    if ( *(u32 *)&old_rte == *(u32 *)&new_rte )
+    {
+        spin_unlock_irqrestore(&ioapic_lock, flags);
+        return 0;
+    }
+
+    if ( old_rte.vector >= FIRST_DEVICE_VECTOR )
+        old_irq = vector_irq[old_rte.vector];
+    if ( new_rte.vector >= FIRST_DEVICE_VECTOR )
+        new_irq = vector_irq[new_rte.vector];
+
+    if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) )
+    {
+        if ( irq_desc[IO_APIC_VECTOR(old_irq)].action )
+        {
+            WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n");
+            spin_unlock_irqrestore(&ioapic_lock, flags);
             return 0;
-
+        }
+
+        remove_pin_at_irq(old_irq, apic, pin);
+    }
+
+    if ( (new_irq != -1) && IO_APIC_IRQ(new_irq) )
+    {
+        if ( irq_desc[IO_APIC_VECTOR(new_irq)].action )
+        {
+            WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n",
+                             (old_irq != new_irq) ? "add" : "modify");
+            spin_unlock_irqrestore(&ioapic_lock, flags);
+            return 0;
+        }
+        
         /* Set the correct irq-handling type. */
-        irq_desc[IO_APIC_VECTOR(irq)].handler = rte.trigger ? 
+        irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ? 
             &ioapic_level_type: &ioapic_edge_type;
-
-        /* Record the pin<->irq mapping. */
-        for ( entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next] )
-        {
-            if ( (entry->apic == apic) && (entry->pin == pin) )
-                break;
-            if ( !entry->next )
-            {
-                add_pin_to_irq(irq, apic, pin);
-                break;
-            }
-        }
-    }
-
-    spin_lock_irqsave(&ioapic_lock, flags);
-    io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0));
-    io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1));
+        
+        if ( old_irq != new_irq )
+            add_pin_to_irq(new_irq, apic, pin);
+
+        /* Mask iff level triggered. */
+        new_rte.mask = new_rte.trigger;
+    }
+    else if ( !new_rte.mask )
+    {
+        /* This pin leads nowhere but the guest has not masked it. */
+        WARN_BOGUS_WRITE("Installing bogus unmasked IO-APIC entry!\n");
+        new_rte.mask = 1;
+    }
+
+
+    io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0));
+    io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1));
+
     spin_unlock_irqrestore(&ioapic_lock, flags);
 
     return 0;
diff -r b7802a60b09f -r c668b024b2c7 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Mon Apr 10 15:28:52 2006
+++ b/xen/arch/x86/irq.c        Mon Apr 10 15:36:03 2006
@@ -171,26 +171,20 @@
 int pirq_guest_unmask(struct domain *d)
 {
     irq_desc_t    *desc;
-    unsigned int   i, j, pirq;
-    u32            m;
+    unsigned int   pirq;
     shared_info_t *s = d->shared_info;
 
-    for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
-    {
-        m = d->pirq_mask[i];
-        while ( m != 0 )
-        {
-            j = find_first_set_bit(m);
-            m &= ~(1 << j);
-            pirq = (i << 5) + j;
-            desc = &irq_desc[irq_to_vector(pirq)];
-            spin_lock_irq(&desc->lock);
-            if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
-                 test_and_clear_bit(pirq, &d->pirq_mask) &&
-                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
-                desc->handler->end(irq_to_vector(pirq));
-            spin_unlock_irq(&desc->lock);
-        }
+    for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
+          pirq < NR_PIRQS;
+          pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
+    {
+        desc = &irq_desc[irq_to_vector(pirq)];
+        spin_lock_irq(&desc->lock);
+        if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
+             test_and_clear_bit(pirq, &d->pirq_mask) &&
+             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+            desc->handler->end(irq_to_vector(pirq));
+        spin_unlock_irq(&desc->lock);
     }
 
     return 0;
diff -r b7802a60b09f -r c668b024b2c7 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c   Mon Apr 10 15:28:52 2006
+++ b/xen/common/sched_sedf.c   Mon Apr 10 15:36:03 2006
@@ -15,34 +15,23 @@
 
 /*verbosity settings*/
 #define SEDFLEVEL 0
-#define PRINT(_f, _a...)  \
-    if ((_f)<=SEDFLEVEL) printk(_a );
+#define PRINT(_f, _a...)                        \
+    do {                                        \
+        if ( (_f) <= SEDFLEVEL )                \
+            printk(_a );                        \
+    } while ( 0 )
 
 #ifndef NDEBUG
 #define SEDF_STATS
-#define CHECK(_p) if ( !(_p) ) \
- { printk("Check '%s' failed, line %d, file %s\n", #_p , __LINE__,\
- __FILE__);}
+#define CHECK(_p)                                           \
+    do {                                                    \
+        if ( !(_p) )                                        \
+            printk("Check '%s' failed, line %d, file %s\n", \
+                   #_p , __LINE__, __FILE__);               \
+    } while ( 0 )
 #else
 #define CHECK(_p) ((void)0)
 #endif
-
-/*various ways of unblocking domains*/
-#define UNBLOCK_ISOCHRONOUS_EDF 1
-#define UNBLOCK_EDF 2
-#define UNBLOCK_ATROPOS 3
-#define UNBLOCK_SHORT_RESUME 4
-#define UNBLOCK_BURST 5
-#define UNBLOCK_EXTRA_SUPPORT 6
-#define UNBLOCK UNBLOCK_EXTRA_SUPPORT
-
-/*various ways of treating extra-time*/
-#define EXTRA_OFF 1
-#define EXTRA_ROUNDR 2
-#define EXTRA_SLICE_WEIGHT 3
-#define EXTRA_BLOCK_WEIGHT 4
-
-#define EXTRA EXTRA_BLOCK_WEIGHT
 
 #define EXTRA_NONE (0)
 #define EXTRA_AWARE (1)
@@ -68,8 +57,8 @@
 struct sedf_dom_info {
     struct domain  *domain;
 };
-struct sedf_vcpu_info
-{
+
+struct sedf_vcpu_info {
     struct vcpu *vcpu;
     struct list_head list;
     struct list_head extralist[2];
@@ -85,10 +74,10 @@
     s_time_t  latency;
  
     /*status of domain*/
-    int   status;
+    int       status;
     /*weights for "Scheduling for beginners/ lazy/ etc." ;)*/
-    short   weight;
-    short                   extraweight;
+    short     weight;
+    short     extraweight;
     /*Bookkeeping*/
     s_time_t  deadl_abs;
     s_time_t  sched_start_abs;
@@ -123,28 +112,29 @@
     s_time_t         current_slice_expires;
 };
 
-#define EDOM_INFO(d)  ((struct sedf_vcpu_info *)((d)->sched_priv))
-#define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
-#define LIST(d)   (&EDOM_INFO(d)->list)
-#define EXTRALIST(d,i)  (&(EDOM_INFO(d)->extralist[i]))
-#define RUNQ(cpu)     (&CPU_INFO(cpu)->runnableq)
+#define EDOM_INFO(d)   ((struct sedf_vcpu_info *)((d)->sched_priv))
+#define CPU_INFO(cpu)  ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
+#define LIST(d)        (&EDOM_INFO(d)->list)
+#define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i]))
+#define RUNQ(cpu)      (&CPU_INFO(cpu)->runnableq)
 #define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
-#define EXTRAQ(cpu,i)    (&(CPU_INFO(cpu)->extraq[i]))
+#define EXTRAQ(cpu,i)  (&(CPU_INFO(cpu)->extraq[i]))
 #define IDLETASK(cpu)  ((struct vcpu *)schedule_data[cpu].idle)
 
 #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
 
-#define MIN(x,y) (((x)<(y))?(x):(y))
+#define MIN(x,y)    (((x)<(y))?(x):(y))
 #define DIV_UP(x,y) (((x) + (y) - 1) / y)
 
-#define extra_runs(inf) ((inf->status) & 6)
+#define extra_runs(inf)      ((inf->status) & 6)
 #define extra_get_cur_q(inf) (((inf->status & 6) >> 1)-1)
-#define sedf_runnable(edom) (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
+#define sedf_runnable(edom)  (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
 
 
 static void sedf_dump_cpu_state(int i);
 
-static inline int extraq_on(struct vcpu *d, int i) {
+static inline int extraq_on(struct vcpu *d, int i)
+{
     return ((EXTRALIST(d,i)->next != NULL) &&
             (EXTRALIST(d,i)->next != EXTRALIST(d,i)));
 }
@@ -165,8 +155,8 @@
 {
     struct list_head *list = EXTRALIST(d,i);
     ASSERT(extraq_on(d,i));
-    PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->domain_id,
-          d->vcpu_id, i); 
+    PRINT(3, "Removing domain %i.%i from L%i extraq\n",
+          d->domain->domain_id, d->vcpu_id, i); 
     list_del(list);
     list->next = NULL;
     ASSERT(!extraq_on(d, i));
@@ -178,94 +168,96 @@
    each entry, in order to avoid overflow. The algorithm works by simply
    charging each domain that recieved extratime with an inverse of its weight.
  */ 
-static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub) {
+static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub)
+{
     struct list_head      *cur;
     struct sedf_vcpu_info *curinf;
  
     ASSERT(!extraq_on(d,i));
+
     PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
           " to L%i extraq\n",
           d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->score[i],
           EDOM_INFO(d)->short_block_lost_tot, i); 
-    /*iterate through all elements to find our "hole" and on our way
-      update all the other scores*/
-    list_for_each(cur,EXTRAQ(d->processor,i)){
+
+    /*
+     * Iterate through all elements to find our "hole" and on our way
+     * update all the other scores.
+     */
+    list_for_each ( cur, EXTRAQ(d->processor, i) )
+    {
         curinf = list_entry(cur,struct sedf_vcpu_info,extralist[i]);
         curinf->score[i] -= sub;
-        if (EDOM_INFO(d)->score[i] < curinf->score[i])
+        if ( EDOM_INFO(d)->score[i] < curinf->score[i] )
             break;
-        else
-            PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
-                  curinf->vcpu->domain->domain_id,
-                  curinf->vcpu->vcpu_id, curinf->score[i]);
-    }
-    /*cur now contains the element, before which we'll enqueue*/
+        PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
+              curinf->vcpu->domain->domain_id,
+              curinf->vcpu->vcpu_id, curinf->score[i]);
+    }
+
+    /* cur now contains the element, before which we'll enqueue. */
     PRINT(3, "\tlist_add to %p\n", cur->prev);
     list_add(EXTRALIST(d,i),cur->prev);
  
-    /*continue updating the extraq*/
-    if ((cur != EXTRAQ(d->processor,i)) && sub)
-        for (cur = cur->next; cur != EXTRAQ(d->processor,i);
-             cur = cur-> next) {
-            curinf = list_entry(cur,struct sedf_vcpu_info,
-                                extralist[i]);
+    /* Continue updating the extraq. */
+    if ( (cur != EXTRAQ(d->processor,i)) && sub )
+    {
+        for ( cur = cur->next; cur != EXTRAQ(d->processor,i); cur = cur->next )
+        {
+            curinf = list_entry(cur,struct sedf_vcpu_info, extralist[i]);
             curinf->score[i] -= sub;
             PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
                   curinf->vcpu->domain->domain_id, 
                   curinf->vcpu->vcpu_id, curinf->score[i]);
         }
+    }
+
     ASSERT(extraq_on(d,i));
 }
-static inline void extraq_check(struct vcpu *d) {
-    if (extraq_on(d, EXTRA_UTIL_Q)) {
-        PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, 
d->vcpu_id);
-        if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
-            !extra_runs(EDOM_INFO(d))) {
+static inline void extraq_check(struct vcpu *d)
+{
+    if ( extraq_on(d, EXTRA_UTIL_Q) )
+    {
+        PRINT(2,"Dom %i.%i is on L1 extraQ\n",
+              d->domain->domain_id, d->vcpu_id);
+
+        if ( !(EDOM_INFO(d)->status & EXTRA_AWARE) &&
+             !extra_runs(EDOM_INFO(d)) )
+        {
             extraq_del(d, EXTRA_UTIL_Q);
             PRINT(2,"Removed dom %i.%i from L1 extraQ\n",
                   d->domain->domain_id, d->vcpu_id);
         }
-    } else {
-        PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->domain_id,
+    }
+    else
+    {
+        PRINT(2, "Dom %i.%i is NOT on L1 extraQ\n",
+              d->domain->domain_id,
               d->vcpu_id);
-        if ((EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d))
-        {
-#if (EXTRA == EXTRA_ROUNDR)
-            extraq_add_tail(d, EXTRA_UTIL_Q);
-#elif (EXTRA == EXTRA_SLICE_WEIGHT || \
-          EXTRA == EXTRA_BLOCK_WEIGHT)
+
+        if ( (EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d) )
+        {
             extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
-#elif
-            ;
-#endif
-            PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->domain_id,
-                  d->vcpu_id);
-        }
-    }
-}
-
-static inline void extraq_check_add_unblocked(struct vcpu *d, 
-                                              int priority) {
+            PRINT(2,"Added dom %i.%i to L1 extraQ\n",
+                  d->domain->domain_id, d->vcpu_id);
+        }
+    }
+}
+
+static inline void extraq_check_add_unblocked(struct vcpu *d, int priority)
+{
     struct sedf_vcpu_info *inf = EDOM_INFO(d);
-    if (inf->status & EXTRA_AWARE) 
-#if (EXTRA == EXTRA_ROUNDR)
-        if (priority)
-            extraq_add_head(d,EXTRA_UTIL_Q);
-        else
-            extraq_add_tail(d,EXTRA_UTIL_Q);
-#elif (EXTRA == EXTRA_SLICE_WEIGHT \
-     || EXTRA == EXTRA_BLOCK_WEIGHT)
-    /*put in on the weighted extraq, 
-    without updating any scores*/
-    extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
-#else
-    ;
-#endif
-}
-
-static inline int __task_on_queue(struct vcpu *d) {
+
+    if ( inf->status & EXTRA_AWARE )
+        /* Put on the weighted extraq without updating any scores. */
+        extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
+}
+
+static inline int __task_on_queue(struct vcpu *d)
+{
     return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
 }
+
 static inline void __del_from_queue(struct vcpu *d)
 {
     struct list_head *list = LIST(d);
@@ -279,42 +271,47 @@
 
 typedef int(*list_comparer)(struct list_head* el1, struct list_head* el2);
 
-static inline void list_insert_sort(struct list_head *list,
-                                    struct list_head *element, list_comparer 
comp) {
+static inline void list_insert_sort(
+    struct list_head *list, struct list_head *element, list_comparer comp)
+{
     struct list_head     *cur;
-    /*iterate through all elements to find our "hole"*/
-    list_for_each(cur,list){
-        if (comp(element, cur) < 0)
+
+    /* Iterate through all elements to find our "hole". */
+    list_for_each( cur, list )
+        if ( comp(element, cur) < 0 )
             break;
-    }
-    /*cur now contains the element, before which we'll enqueue*/
+
+    /* cur now contains the element, before which we'll enqueue. */
     PRINT(3,"\tlist_add to %p\n",cur->prev);
     list_add(element, cur->prev);
-}  
+}
+
 #define DOMAIN_COMPARER(name, field, comp1, comp2)          \
 int name##_comp(struct list_head* el1, struct list_head* el2) \
 {                                                           \
- struct sedf_vcpu_info *d1, *d2;                     \
- d1 = list_entry(el1,struct sedf_vcpu_info, field);  \
- d2 = list_entry(el2,struct sedf_vcpu_info, field);  \
- if ((comp1) == (comp2))                             \
-  return 0;                                   \
- if ((comp1) < (comp2))                              \
-  return -1;                                  \
- else                                                \
-  return 1;                                   \
-}
+    struct sedf_vcpu_info *d1, *d2;                     \
+    d1 = list_entry(el1,struct sedf_vcpu_info, field);  \
+    d2 = list_entry(el2,struct sedf_vcpu_info, field);  \
+    if ( (comp1) == (comp2) )                             \
+        return 0;                                   \
+    if ( (comp1) < (comp2) )                              \
+        return -1;                                  \
+    else                                                \
+        return 1;                                   \
+}
+
 /* adds a domain to the queue of processes which wait for the beginning of the
    next period; this list is therefore sortet by this time, which is simply
    absol. deadline - period
  */ 
-DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
-    static inline void __add_to_waitqueue_sort(struct vcpu *d) {
-    ASSERT(!__task_on_queue(d));
+DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2));
+static inline void __add_to_waitqueue_sort(struct vcpu *v)
+{
+    ASSERT(!__task_on_queue(v));
     PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
-          d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
-    list_insert_sort(WAITQ(d->processor), LIST(d), waitq_comp);
-    ASSERT(__task_on_queue(d));
+          v->domain->domain_id, v->vcpu_id, PERIOD_BEGIN(EDOM_INFO(v)));
+    list_insert_sort(WAITQ(v->processor), LIST(v), waitq_comp);
+    ASSERT(__task_on_queue(v));
 }
 
 /* adds a domain to the queue of processes which have started their current
@@ -322,60 +319,62 @@
    on this list is running on the processor, if the list is empty the idle
    task will run. As we are implementing EDF, this list is sorted by deadlines.
  */ 
-DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
-    static inline void __add_to_runqueue_sort(struct vcpu *d) {
+DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs);
+static inline void __add_to_runqueue_sort(struct vcpu *v)
+{
     PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
-          d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
-    list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
+          v->domain->domain_id, v->vcpu_id, EDOM_INFO(v)->deadl_abs);
+    list_insert_sort(RUNQ(v->processor), LIST(v), runq_comp);
 }
 
 
 /* Allocates memory for per domain private scheduling data*/
-static int sedf_alloc_task(struct vcpu *d)
+static int sedf_alloc_task(struct vcpu *v)
 {
     PRINT(2, "sedf_alloc_task was called, domain-id %i.%i\n",
-          d->domain->domain_id, d->vcpu_id);
-
-    if ( d->domain->sched_priv == NULL )
-    {
-        d->domain->sched_priv = xmalloc(struct sedf_dom_info);
-        if ( d->domain->sched_priv == NULL )
+          v->domain->domain_id, v->vcpu_id);
+
+    if ( v->domain->sched_priv == NULL )
+    {
+        v->domain->sched_priv = xmalloc(struct sedf_dom_info);
+        if ( v->domain->sched_priv == NULL )
             return -1;
-        memset(d->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
-    }
-
-    if ( (d->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
+        memset(v->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
+    }
+
+    if ( (v->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
         return -1;
 
-    memset(d->sched_priv, 0, sizeof(struct sedf_vcpu_info));
+    memset(v->sched_priv, 0, sizeof(struct sedf_vcpu_info));
 
     return 0;
 }
 
 
 /* Setup the sedf_dom_info */
-static void sedf_add_task(struct vcpu *d)
-{
-    struct sedf_vcpu_info *inf = EDOM_INFO(d);
-    inf->vcpu = d;
- 
-    PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
-          d->vcpu_id);
+static void sedf_add_task(struct vcpu *v)
+{
+    struct sedf_vcpu_info *inf = EDOM_INFO(v);
+
+    inf->vcpu = v;
+ 
+    PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",
+          v->domain->domain_id, v->vcpu_id);
 
     /* Allocate per-CPU context if this is the first domain to be added. */
-    if ( unlikely(schedule_data[d->processor].sched_priv == NULL) )
-    {
-        schedule_data[d->processor].sched_priv = 
+    if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
+    {
+        schedule_data[v->processor].sched_priv = 
             xmalloc(struct sedf_cpu_info);
-        BUG_ON(schedule_data[d->processor].sched_priv == NULL);
-        memset(CPU_INFO(d->processor), 0, sizeof(*CPU_INFO(d->processor)));
-        INIT_LIST_HEAD(WAITQ(d->processor));
-        INIT_LIST_HEAD(RUNQ(d->processor));
-        INIT_LIST_HEAD(EXTRAQ(d->processor,EXTRA_PEN_Q));
-        INIT_LIST_HEAD(EXTRAQ(d->processor,EXTRA_UTIL_Q));
+        BUG_ON(schedule_data[v->processor].sched_priv == NULL);
+        memset(CPU_INFO(v->processor), 0, sizeof(*CPU_INFO(v->processor)));
+        INIT_LIST_HEAD(WAITQ(v->processor));
+        INIT_LIST_HEAD(RUNQ(v->processor));
+        INIT_LIST_HEAD(EXTRAQ(v->processor,EXTRA_PEN_Q));
+        INIT_LIST_HEAD(EXTRAQ(v->processor,EXTRA_UTIL_Q));
     }
        
-    if ( d->domain->domain_id == 0 )
+    if ( v->domain->domain_id == 0 )
     {
         /*set dom0 to something useful to boot the machine*/
         inf->period    = MILLISECS(20);
@@ -400,14 +399,14 @@
     INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
     INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
  
-    if ( !is_idle_vcpu(d) )
-    {
-        extraq_check(d);
+    if ( !is_idle_vcpu(v) )
+    {
+        extraq_check(v);
     }
     else
     {
-        EDOM_INFO(d)->deadl_abs = 0;
-        EDOM_INFO(d)->status &= ~SEDF_ASLEEP;
+        EDOM_INFO(v)->deadl_abs = 0;
+        EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
     }
 }
 
@@ -418,17 +417,11 @@
 
     PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
 
-    ASSERT(d->sched_priv != NULL);
     xfree(d->sched_priv);
  
     for ( i = 0; i < MAX_VIRT_CPUS; i++ )
-    {
         if ( d->vcpu[i] )
-        {
-            ASSERT(d->vcpu[i]->sched_priv != NULL);
             xfree(d->vcpu[i]->sched_priv);
-        }
-    }
 }
 
 /*
@@ -438,64 +431,60 @@
 static void desched_edf_dom(s_time_t now, struct vcpu* d)
 {
     struct sedf_vcpu_info* inf = EDOM_INFO(d);
-    /*current domain is running in real time mode*/
- 
+
+    /* Current domain is running in real time mode. */
     ASSERT(__task_on_queue(d));
-    /*update the domains cputime*/
+
+    /* Update the domain's cputime. */
     inf->cputime += now - inf->sched_start_abs;
 
-    /*scheduling decisions, which don't remove the running domain
-      from the runq*/
+    /*
+     * Scheduling decisions which don't remove the running domain from the
+     * runq. 
+     */
     if ( (inf->cputime < inf->slice) && sedf_runnable(d) )
         return;
   
     __del_from_queue(d);
   
-    /*manage bookkeeping (i.e. calculate next deadline,
-      memorize overun-time of slice) of finished domains*/
+    /*
+     * Manage bookkeeping (i.e. calculate next deadline, memorise
+     * overrun-time of slice) of finished domains.
+     */
     if ( inf->cputime >= inf->slice )
     {
         inf->cputime -= inf->slice;
   
         if ( inf->period < inf->period_orig )
         {
-            /*this domain runs in latency scaling or burst mode*/
-#if (UNBLOCK == UNBLOCK_BURST)
-            /*if we are runnig in burst scaling wait for two periods
-              before scaling periods up again*/ 
-            if ( (now - inf->unblock_abs) >= (2 * inf->period) )
-#endif
+            /* This domain runs in latency scaling or burst mode. */
+            inf->period *= 2;
+            inf->slice  *= 2;
+            if ( (inf->period > inf->period_orig) ||
+                 (inf->slice > inf->slice_orig) )
             {
-                inf->period *= 2; inf->slice *= 2;
-                if ( (inf->period > inf->period_orig) ||
-                     (inf->slice > inf->slice_orig) )
-                {
-                    /*reset slice & period*/
-                    inf->period = inf->period_orig;
-                    inf->slice = inf->slice_orig;
-                }
+                /* Reset slice and period. */
+                inf->period = inf->period_orig;
+                inf->slice = inf->slice_orig;
             }
         }
-        /*set next deadline*/
+
+        /* Set next deadline. */
         inf->deadl_abs += inf->period;
     }
  
-    /*add a runnable domain to the waitqueue*/
+    /* Add a runnable domain to the waitqueue. */
     if ( sedf_runnable(d) )
     {
         __add_to_waitqueue_sort(d);
     }
     else
     {
-        /*we have a blocked realtime task -> remove it from exqs too*/
-#if (EXTRA > EXTRA_OFF)
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
+        /* We have a blocked realtime task -> remove it from exqs too. */
         if ( extraq_on(d, EXTRA_PEN_Q) )
             extraq_del(d, EXTRA_PEN_Q);
-#endif
         if ( extraq_on(d, EXTRA_UTIL_Q) )
             extraq_del(d, EXTRA_UTIL_Q);
-#endif
     }
 
     ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
@@ -513,58 +502,57 @@
  
     PRINT(3,"Updating waitq..\n");
 
-    /*check for the first elements of the waitqueue, whether their
-      next period has already started*/
-    list_for_each_safe(cur, tmp, waitq) {
+    /*
+     * Check for the first elements of the waitqueue, whether their
+     * next period has already started.
+     */
+    list_for_each_safe ( cur, tmp, waitq )
+    {
         curinf = list_entry(cur, struct sedf_vcpu_info, list);
         PRINT(4,"\tLooking @ dom %i.%i\n",
               curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
-        if ( PERIOD_BEGIN(curinf) <= now )
-        {
-            __del_from_queue(curinf->vcpu);
-            __add_to_runqueue_sort(curinf->vcpu);
-        }
-        else
+        if ( PERIOD_BEGIN(curinf) > now )
             break;
+        __del_from_queue(curinf->vcpu);
+        __add_to_runqueue_sort(curinf->vcpu);
     }
  
     PRINT(3,"Updating runq..\n");
 
-    /*process the runq, find domains that are on
-      the runqueue which shouldn't be there*/
-    list_for_each_safe(cur, tmp, runq) {
+    /* Process the runq, find domains that are on the runq that shouldn't. */
+    list_for_each_safe ( cur, tmp, runq )
+    {
         curinf = list_entry(cur,struct sedf_vcpu_info,list);
         PRINT(4,"\tLooking @ dom %i.%i\n",
               curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
 
         if ( unlikely(curinf->slice == 0) )
         {
-            /*ignore domains with empty slice*/
+            /* Ignore domains with empty slice. */
             PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
                   curinf->vcpu->domain->domain_id,
                   curinf->vcpu->vcpu_id);
             __del_from_queue(curinf->vcpu);
 
-            /*move them to their next period*/
+            /* Move them to their next period. */
             curinf->deadl_abs += curinf->period;
-            /*ensure that the start of the next period is in the future*/
+
+            /* Ensure that the start of the next period is in the future. */
             if ( unlikely(PERIOD_BEGIN(curinf) < now) )
-            {
                 curinf->deadl_abs += 
                     (DIV_UP(now - PERIOD_BEGIN(curinf),
-                           curinf->period)) * curinf->period;
-            }
-            /*and put them back into the queue*/
+                            curinf->period)) * curinf->period;
+
+            /* Put them back into the queue. */
             __add_to_waitqueue_sort(curinf->vcpu);
-            continue;
-        }
-
-        if ( unlikely((curinf->deadl_abs < now) ||
-                      (curinf->cputime > curinf->slice)) )
-        {
-            /*we missed the deadline or the slice was
-              already finished... might hapen because
-              of dom_adj.*/
+        }
+        else if ( unlikely((curinf->deadl_abs < now) ||
+                           (curinf->cputime > curinf->slice)) )
+        {
+            /*
+             * We missed the deadline or the slice was already finished.
+             * Might hapen because of dom_adj.
+             */
             PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
                   "slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
                   " cputime: %"PRIu64"\n",
@@ -573,20 +561,23 @@
                   curinf->deadl_abs, curinf->slice, now,
                   curinf->cputime);
             __del_from_queue(curinf->vcpu);
-            /*common case: we miss one period!*/
+
+            /* Common case: we miss one period. */
             curinf->deadl_abs += curinf->period;
    
-            /*if we are still behind: modulo arithmetic,
-              force deadline to be in future and
-              aligned to period borders!*/
-            if (unlikely(curinf->deadl_abs < now))
+            /*
+             * If we are still behind: modulo arithmetic, force deadline
+             * to be in future and aligned to period borders.
+             */
+            if ( unlikely(curinf->deadl_abs < now) )
                 curinf->deadl_abs += 
                     DIV_UP(now - curinf->deadl_abs,
                            curinf->period) * curinf->period;
             ASSERT(curinf->deadl_abs >= now);
-            /*give a fresh slice*/
+
+            /* Give a fresh slice. */
             curinf->cputime = 0;
-            if (PERIOD_BEGIN(curinf) > now)
+            if ( PERIOD_BEGIN(curinf) > now )
                 __add_to_waitqueue_sort(curinf->vcpu);
             else
                 __add_to_runqueue_sort(curinf->vcpu);
@@ -594,43 +585,36 @@
         else
             break;
     }
+
     PRINT(3,"done updating the queues\n");
 }
 
 
-#if (EXTRA > EXTRA_OFF)
 /* removes a domain from the head of the according extraQ and
    requeues it at a specified position:
      round-robin extratime: end of extraQ
      weighted ext.: insert in sorted list by score
    if the domain is blocked / has regained its short-block-loss
    time it is not put on any queue */
-static void desched_extra_dom(s_time_t now, struct vcpu* d)
+static void desched_extra_dom(s_time_t now, struct vcpu *d)
 {
     struct sedf_vcpu_info *inf = EDOM_INFO(d);
     int i = extra_get_cur_q(inf);
- 
-#if (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
-    unsigned long         oldscore;
-#endif
+    unsigned long oldscore;
+
     ASSERT(extraq_on(d, i));
-    /*unset all running flags*/
+
+    /* Unset all running flags. */
     inf->status  &= ~(EXTRA_RUN_PEN | EXTRA_RUN_UTIL);
-    /*fresh slice for the next run*/
+    /* Fresh slice for the next run. */
     inf->cputime = 0;
-    /*accumulate total extratime*/
+    /* Accumulate total extratime. */
     inf->extra_time_tot += now - inf->sched_start_abs;
-    /*remove extradomain from head of the queue*/
+    /* Remove extradomain from head of the queue. */
     extraq_del(d, i);
 
-#if (EXTRA == EXTRA_ROUNDR)
-    if ( sedf_runnable(d) && (inf->status & EXTRA_AWARE) )
-        /*add to the tail if it is runnable => round-robin*/
-        extraq_add_tail(d, EXTRA_UTIL_Q);
-#elif (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
-    /*update the score*/
+    /* Update the score. */
     oldscore = inf->score[i];
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
     if ( i == EXTRA_PEN_Q )
     {
         /*domain was running in L0 extraq*/
@@ -640,7 +624,8 @@
         PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n", 
               inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id,
               inf->short_block_lost_tot);
-        if (inf->short_block_lost_tot <= 0) {
+        if ( inf->short_block_lost_tot <= 0 )
+        {
             PRINT(4,"Domain %i.%i compensated short block loss!\n",
                   inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id);
             /*we have (over-)compensated our block penalty*/
@@ -649,6 +634,7 @@
             inf->status &= ~EXTRA_WANT_PEN_Q;
             goto check_extra_queues;
         }
+
         /*we have to go again for another try in the block-extraq,
           the score is not used incremantally here, as this is
           already done by recalculating the block_lost*/
@@ -657,7 +643,6 @@
         oldscore = 0;
     }
     else
-#endif
     {
         /*domain was running in L1 extraq => score is inverse of
           utilization and is used somewhat incremental!*/
@@ -684,7 +669,6 @@
     {
         /*remove this blocked domain from the waitq!*/
         __del_from_queue(d);
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
         /*make sure that we remove a blocked domain from the other
           extraq too*/
         if ( i == EXTRA_PEN_Q )
@@ -697,14 +681,12 @@
             if ( extraq_on(d, EXTRA_PEN_Q) )
                 extraq_del(d, EXTRA_PEN_Q);
         }
-#endif
-    }
-#endif
+    }
+
     ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
     ASSERT(IMPLY(extraq_on(d, EXTRA_UTIL_Q) || extraq_on(d, EXTRA_PEN_Q), 
                  sedf_runnable(d)));
 }
-#endif
 
 
 static struct task_slice sedf_do_extra_schedule(
@@ -718,7 +700,6 @@
     if ( end_xt - now < EXTRA_QUANTUM )
         goto return_idle;
 
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
     if ( !list_empty(extraq[EXTRA_PEN_Q]) )
     {
         /*we still have elements on the level 0 extraq 
@@ -733,7 +714,6 @@
 #endif
     }
     else
-#endif
     {
         if ( !list_empty(extraq[EXTRA_UTIL_Q]) )
         {
@@ -772,11 +752,9 @@
     int                   cpu      = smp_processor_id();
     struct list_head     *runq     = RUNQ(cpu);
     struct list_head     *waitq    = WAITQ(cpu);
-#if (EXTRA > EXTRA_OFF)
     struct sedf_vcpu_info *inf     = EDOM_INFO(current);
     struct list_head      *extraq[] = {
         EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)};
-#endif
     struct sedf_vcpu_info *runinf, *waitinf;
     struct task_slice      ret;
 
@@ -793,14 +771,12 @@
     if ( inf->status & SEDF_ASLEEP )
         inf->block_abs = now;
 
-#if (EXTRA > EXTRA_OFF)
     if ( unlikely(extra_runs(inf)) )
     {
         /*special treatment of domains running in extra time*/
         desched_extra_dom(now, current);
     }
     else 
-#endif
     {
         desched_edf_dom(now, current);
     }
@@ -837,13 +813,8 @@
         waitinf  = list_entry(waitq->next,struct sedf_vcpu_info, list);
         /*we could not find any suitable domain 
           => look for domains that are aware of extratime*/
-#if (EXTRA > EXTRA_OFF)
         ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf),
                                      extraq, cpu);
-#else
-        ret.task = IDLETASK(cpu);
-        ret.time = PERIOD_BEGIN(waitinf) - now;
-#endif
         CHECK(ret.time > 0);
     }
     else
@@ -891,14 +862,10 @@
     {
         if ( __task_on_queue(d) )
             __del_from_queue(d);
-#if (EXTRA > EXTRA_OFF)
         if ( extraq_on(d, EXTRA_UTIL_Q) ) 
             extraq_del(d, EXTRA_UTIL_Q);
-#endif
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
         if ( extraq_on(d, EXTRA_PEN_Q) )
             extraq_del(d, EXTRA_PEN_Q);
-#endif
     }
 }
 
@@ -939,7 +906,7 @@
  *     -addition: experiments have shown that this may have a HUGE impact on
  *      performance of other domains, becaus it can lead to excessive context
  *      switches
- 
+ *
  *    Part2: Long Unblocking
  *    Part 2a
  *     -it is obvious that such accounting of block time, applied when
@@ -974,32 +941,6 @@
  *     -either behaviour can lead to missed deadlines in other domains as
  *      opposed to approaches 1,2a,2b
  */
-#if (UNBLOCK <= UNBLOCK_SHORT_RESUME)
-static void unblock_short_vcons(struct sedf_vcpu_info* inf, s_time_t now)
-{
-    inf->deadl_abs += inf->period;
-    inf->cputime = 0;
-}
-#endif
-
-#if (UNBLOCK == UNBLOCK_SHORT_RESUME)
-static void unblock_short_cons(struct sedf_vcpu_info* inf, s_time_t now)
-{
-    /*treat blocked time as consumed by the domain*/
-    inf->cputime += now - inf->block_abs; 
-    if ( (inf->cputime + EXTRA_QUANTUM) > inf->slice )
-    {
-        /*we don't have a reasonable amount of time in 
-          our slice left :( => start in next period!*/
-        unblock_short_vcons(inf, now);
-    }
-#ifdef SEDF_STATS
-    else
-        inf->short_cont++;
-#endif
-}
-#endif
-
 static void unblock_short_extra_support(
     struct sedf_vcpu_info* inf, s_time_t now)
 {
@@ -1051,33 +992,6 @@
 }
 
 
-#if (UNBLOCK == UNBLOCK_ISOCHRONOUS_EDF)
-static void unblock_long_vcons(struct sedf_vcpu_info* inf, s_time_t now)
-{
-    /* align to next future period */
-    inf->deadl_abs += (DIV_UP(now - inf->deadl_abs, inf->period) +1)
-        * inf->period;
-    inf->cputime = 0;
-}
-#endif
-
-
-#if 0
-static void unblock_long_cons_a (struct sedf_vcpu_info* inf, s_time_t now)
-{
-    /*treat the time the domain was blocked in the
-     CURRENT period as consumed by the domain*/
-    inf->cputime = (now - inf->deadl_abs) % inf->period; 
-    if ( (inf->cputime + EXTRA_QUANTUM) > inf->slice )
-    {
-        /*we don't have a reasonable amount of time in our slice
-          left :( => start in next period!*/
-        unblock_long_vcons(inf, now);
-    }
-}
-#endif
-
-
 static void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now)
 {
     /*Conservative 2b*/
@@ -1085,110 +999,6 @@
     inf->deadl_abs = now + inf->period;
     inf->cputime = 0;
 }
-
-
-#if (UNBLOCK == UNBLOCK_ATROPOS)
-static void unblock_long_cons_c(struct sedf_vcpu_info* inf,s_time_t now)
-{
-    if ( likely(inf->latency) )
-    {
-        /*scale the slice and period accordingly to the latency hint*/
-        /*reduce period temporarily to the latency hint*/
-        inf->period = inf->latency;
-        /*this results in max. 4s slice/period length*/
-        ASSERT((inf->period < ULONG_MAX)
-               && (inf->slice_orig < ULONG_MAX));
-        /*scale slice accordingly, so that utilisation stays the same*/
-        inf->slice = (inf->period * inf->slice_orig)
-            / inf->period_orig;
-        inf->deadl_abs = now + inf->period;
-        inf->cputime = 0;
-    } 
-    else
-    {
-        /*we don't have a latency hint.. use some other technique*/
-        unblock_long_cons_b(inf, now);
-    }
-}
-#endif
-
-
-#if (UNBLOCK == UNBLOCK_BURST)
-/*a new idea of dealing with short blocks: burst period scaling*/
-static void unblock_short_burst(struct sedf_vcpu_info* inf, s_time_t now)
-{
-    /*treat blocked time as consumed by the domain*/
-    inf->cputime += now - inf->block_abs;
- 
-    if ( (inf->cputime + EXTRA_QUANTUM) <= inf->slice )
-    {
-        /*if we can still use some time in the current slice
-          then use it!*/
-#ifdef SEDF_STATS
-        /*we let the domain run in the current period*/
-        inf->short_cont++;
-#endif
-    }
-    else
-    {
-        /*we don't have a reasonable amount of time in
-          our slice left => switch to burst mode*/
-        if ( likely(inf->unblock_abs) )
-        {
-            /*set the period-length to the current blocking
-              interval, possible enhancements: average over last
-              blocking intervals, user-specified minimum,...*/
-            inf->period = now - inf->unblock_abs;
-            /*check for overflow on multiplication*/
-            ASSERT((inf->period < ULONG_MAX) 
-                   && (inf->slice_orig < ULONG_MAX));
-            /*scale slice accordingly, so that utilisation
-              stays the same*/
-            inf->slice = (inf->period * inf->slice_orig)
-                / inf->period_orig;
-            /*set new (shorter) deadline*/
-            inf->deadl_abs += inf->period;
-        }
-        else
-        {
-            /*in case we haven't unblocked before
-              start in next period!*/
-            inf->cputime=0;
-            inf->deadl_abs += inf->period;
-        }
-    }
-
-    inf->unblock_abs = now;
-}
-
-
-static void unblock_long_burst(struct sedf_vcpu_info* inf, s_time_t now)
-{
-    if ( unlikely(inf->latency && (inf->period > inf->latency)) )
-    {
-        /*scale the slice and period accordingly to the latency hint*/
-        inf->period = inf->latency;
-        /*check for overflows on multiplication*/
-        ASSERT((inf->period < ULONG_MAX)
-               && (inf->slice_orig < ULONG_MAX));
-        /*scale slice accordingly, so that utilisation stays the same*/
-        inf->slice = (inf->period * inf->slice_orig)
-            / inf->period_orig;
-        inf->deadl_abs = now + inf->period;
-        inf->cputime = 0;
-    }
-    else
-    {
-        /*we don't have a latency hint.. or we are currently in 
-          "burst mode": use some other technique
-          NB: this should be in fact the normal way of operation,
-          when we are in sync with the device!*/
-        unblock_long_cons_b(inf, now);
-    }
-
-    inf->unblock_abs = now;
-}
-#endif /* UNBLOCK == UNBLOCK_BURST */
 
 
 #define DOMAIN_EDF   1
@@ -1225,32 +1035,31 @@
     cur_inf   = EDOM_INFO(cur);
     other_inf = EDOM_INFO(other);
  
- /*check whether we need to make an earlier sched-decision*/
-    if (PERIOD_BEGIN(other_inf) < 
-        CPU_INFO(other->processor)->current_slice_expires)
+    /* Check whether we need to make an earlier scheduling decision. */
+    if ( PERIOD_BEGIN(other_inf) < 
+         CPU_INFO(other->processor)->current_slice_expires )
         return 1;
-    /*no timing-based switches need to be taken into account here*/
-    switch (get_run_type(cur)) {
+
+    /* No timing-based switches need to be taken into account here. */
+    switch ( get_run_type(cur) )
+    {
     case DOMAIN_EDF:
-        /* do not interrupt a running EDF domain */ 
+        /* Do not interrupt a running EDF domain. */
         return 0;
     case DOMAIN_EXTRA_PEN:
-        /*check whether we also want 
-          the L0 ex-q with lower score*/
-        if ((other_inf->status & EXTRA_WANT_PEN_Q)
-            &&  (other_inf->score[EXTRA_PEN_Q] < 
-                 cur_inf->score[EXTRA_PEN_Q]))
-            return 1;
-        else return 0;
+        /* Check whether we also want the L0 ex-q with lower score. */
+        return ((other_inf->status & EXTRA_WANT_PEN_Q) &&
+                (other_inf->score[EXTRA_PEN_Q] < 
+                 cur_inf->score[EXTRA_PEN_Q]));
     case DOMAIN_EXTRA_UTIL:
-        /*check whether we want the L0 extraq, don't
-          switch if both domains want L1 extraq */
-        if (other_inf->status & EXTRA_WANT_PEN_Q)
-            return 1;
-        else return 0;
+        /* Check whether we want the L0 extraq. Don't
+         * switch if both domains want L1 extraq.
+         */
+        return !!(other_inf->status & EXTRA_WANT_PEN_Q);
     case DOMAIN_IDLE:
         return 1;
     }
+
     return 1;
 }
 
@@ -1295,7 +1104,6 @@
     {
         PRINT(4,"extratime unblock\n");
         /* unblocking in extra-time! */
-#if (EXTRA == EXTRA_BLOCK_WEIGHT)
         if ( inf->status & EXTRA_WANT_PEN_Q )
         {
             /*we have a domain that wants compensation
@@ -1304,7 +1112,6 @@
               chance!*/
             extraq_add_sort_update(d, EXTRA_PEN_Q, 0);
         }
-#endif
         extraq_check_add_unblocked(d, 0);
     }  
     else
@@ -1316,15 +1123,7 @@
 #ifdef SEDF_STATS
             inf->short_block_tot++;
 #endif
-#if (UNBLOCK <= UNBLOCK_ATROPOS)
-            unblock_short_vcons(inf, now);
-#elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
-            unblock_short_cons(inf, now);
-#elif (UNBLOCK == UNBLOCK_BURST)
-            unblock_short_burst(inf, now);
-#elif (UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
             unblock_short_extra_support(inf, now);
-#endif
 
             extraq_check_add_unblocked(d, 1);
         }
@@ -1335,18 +1134,7 @@
 #ifdef SEDF_STATS
             inf->long_block_tot++;
 #endif
-#if (UNBLOCK == UNBLOCK_ISOCHRONOUS_EDF)
-            unblock_long_vcons(inf, now);
-#elif (UNBLOCK == UNBLOCK_EDF \
-       || UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
             unblock_long_cons_b(inf, now);
-#elif (UNBLOCK == UNBLOCK_ATROPOS)
-            unblock_long_cons_c(inf, now);
-#elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
-            unblock_long_cons_b(inf, now);
-#elif (UNBLOCK == UNBLOCK_BURST)
-            unblock_long_burst(inf, now);
-#endif
 
             extraq_check_add_unblocked(d, 1);
         }
@@ -1528,7 +1316,7 @@
         sumt[cpu] = 0;
     }
 
-    /* sum up all weights */
+    /* Sum across all weights. */
     for_each_domain( d )
     {
         for_each_vcpu( d, p )
@@ -1553,7 +1341,7 @@
         }
     }
 
-    /* adjust all slices (and periods) to the new weight */
+    /* Adjust all slices (and periods) to the new weight. */
     for_each_domain( d )
     {
         for_each_vcpu ( d, p )
@@ -1580,35 +1368,42 @@
 {
     struct vcpu *v;
 
-    PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
+    PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "
           "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
           p->domain_id, cmd->u.sedf.period, cmd->u.sedf.slice,
           cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
 
     if ( cmd->direction == SCHED_INFO_PUT )
     {
-        /*check for sane parameters*/
-        if (!cmd->u.sedf.period && !cmd->u.sedf.weight)
+        /* Check for sane parameters. */
+        if ( !cmd->u.sedf.period && !cmd->u.sedf.weight )
             return -EINVAL;
-        if (cmd->u.sedf.weight) {
-            if ((cmd->u.sedf.extratime & EXTRA_AWARE) &&
-                (! cmd->u.sedf.period)) {
-                /*weight driven domains with xtime ONLY!*/
-                for_each_vcpu(p, v) {
+        if ( cmd->u.sedf.weight )
+        {
+            if ( (cmd->u.sedf.extratime & EXTRA_AWARE) &&
+                 (!cmd->u.sedf.period) )
+            {
+                /* Weight-driven domains with extratime only. */
+                for_each_vcpu ( p, v )
+                {
                     EDOM_INFO(v)->extraweight = cmd->u.sedf.weight;
                     EDOM_INFO(v)->weight = 0;
                     EDOM_INFO(v)->slice = 0;
                     EDOM_INFO(v)->period = WEIGHT_PERIOD;
                 }
-            } else {
-                /*weight driven domains with real-time execution*/
-                for_each_vcpu(p, v)
+            }
+            else
+            {
+                /* Weight-driven domains with real-time execution. */
+                for_each_vcpu ( p, v )
                     EDOM_INFO(v)->weight = cmd->u.sedf.weight;
             }
         }
-        else {
-            /*time driven domains*/
-            for_each_vcpu(p, v) {
+        else
+        {
+            /* Time-driven domains. */
+            for_each_vcpu ( p, v )
+            {
                 /*
                  * Sanity checking: note that disabling extra weight requires
                  * that we set a non-zero slice.
@@ -1626,10 +1421,12 @@
                     EDOM_INFO(v)->slice   = cmd->u.sedf.slice;
             }
         }
-        if (sedf_adjust_weights(cmd))
+
+        if ( sedf_adjust_weights(cmd) )
             return -EINVAL;
-   
-        for_each_vcpu(p, v) {
+
+        for_each_vcpu ( p, v )
+        {
             EDOM_INFO(v)->status  = 
                 (EDOM_INFO(v)->status &
                  ~EXTRA_AWARE) | (cmd->u.sedf.extratime & EXTRA_AWARE);
@@ -1641,11 +1438,11 @@
     {
         cmd->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
         cmd->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
-        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status
-            & EXTRA_AWARE;
+        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status & EXTRA_AWARE;
         cmd->u.sedf.latency   = EDOM_INFO(p->vcpu[0])->latency;
         cmd->u.sedf.weight    = EDOM_INFO(p->vcpu[0])->weight;
     }
+
     PRINT(2,"sedf_adjdom_finished\n");
     return 0;
 }
diff -r b7802a60b09f -r c668b024b2c7 xen/common/trace.c
--- a/xen/common/trace.c        Mon Apr 10 15:28:52 2006
+++ b/xen/common/trace.c        Mon Apr 10 15:36:03 2006
@@ -27,6 +27,8 @@
 #include <xen/smp.h>
 #include <xen/trace.h>
 #include <xen/errno.h>
+#include <xen/event.h>
+#include <xen/softirq.h>
 #include <xen/init.h>
 #include <asm/atomic.h>
 #include <public/dom0_ops.h>
@@ -40,6 +42,11 @@
 static struct t_rec *t_recs[NR_CPUS];
 static int nr_recs;
 
+/* High water mark for trace buffers; */
+/* Send virtual interrupt when buffer level reaches this point */
+static int t_buf_highwater;
+
+
 /* a flag recording whether initialization has been done */
 /* or more properly, if the tbuf subsystem is enabled right now */
 int tb_init_done;
@@ -49,6 +56,12 @@
 
 /* which tracing events are enabled */
 static u32 tb_event_mask = TRC_ALL;
+
+static void trace_notify_guest(void)
+{
+    send_guest_global_virq(dom0, VIRQ_TBUF);
+}
+
 
 /**
  * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
@@ -92,6 +105,9 @@
         buf->cons = buf->prod = 0;
         t_recs[i] = (struct t_rec *)(buf + 1);
     }
+
+    t_buf_highwater = nr_recs >> 1; /* 50% high water */
+    open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
 
     return 0;
 }
@@ -272,6 +288,13 @@
     buf->prod++;
 
     local_irq_restore(flags);
+
+    /*
+     * Notify trace buffer consumer that we've reached the high water mark.
+     *
+     */
+    if ( (buf->prod - buf->cons) == t_buf_highwater )
+        raise_softirq(TRACE_SOFTIRQ);
 }
 
 /*
diff -r b7802a60b09f -r c668b024b2c7 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Mon Apr 10 15:28:52 2006
+++ b/xen/include/public/xen.h  Mon Apr 10 15:36:03 2006
@@ -77,6 +77,7 @@
 #define VIRQ_DEBUG      1  /* V. Request guest to dump debug info.           */
 #define VIRQ_CONSOLE    2  /* G. (DOM0) Bytes received on emergency console. */
 #define VIRQ_DOM_EXC    3  /* G. (DOM0) Exceptional event for some domain.   */
+#define VIRQ_TBUF       4  /* G. (DOM0) Trace buffer has records available.  */
 #define VIRQ_DEBUGGER   6  /* G. (DOM0) A domain has paused for debugging.   */
 #define VIRQ_XENOPROF   7  /* V. XenOprofile interrupt: new sample available */
 #define NR_VIRQS        8
diff -r b7802a60b09f -r c668b024b2c7 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Mon Apr 10 15:28:52 2006
+++ b/xen/include/xen/sched.h   Mon Apr 10 15:36:03 2006
@@ -134,7 +134,7 @@
      */
 #define NR_PIRQS 256 /* Put this somewhere sane! */
     u16              pirq_to_evtchn[NR_PIRQS];
-    u32              pirq_mask[NR_PIRQS/32];
+    DECLARE_BITMAP(pirq_mask, NR_PIRQS);
 
     /* I/O capabilities (access to IRQs and memory-mapped I/O). */
     struct rangeset *iomem_caps;
diff -r b7802a60b09f -r c668b024b2c7 xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Mon Apr 10 15:28:52 2006
+++ b/xen/include/xen/softirq.h Mon Apr 10 15:36:03 2006
@@ -9,7 +9,8 @@
 #define NMI_SOFTIRQ                       4
 #define PAGE_SCRUB_SOFTIRQ                5
 #define DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ  6
-#define NR_SOFTIRQS                       7
+#define TRACE_SOFTIRQ                     7
+#define NR_SOFTIRQS                       8
 
 #ifndef __ASSEMBLY__
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.