[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable.hg



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1176476868 -3600
# Node ID 039daabebad5d3c69fb9497693e8e3fd4fee00c9
# Parent  c42ae7839750d685e0330f76351af8b02deabadc
# Parent  0ab8f81019a5df4250893f1aae2d64969b4d1c18
Merge with xen-ia64-unstable.hg
---
 .hgignore                                            |    2 
 README                                               |    4 
 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c |   26 +-
 tools/libxc/xc_domain_restore.c                      |   41 +--
 tools/libxc/xc_domain_save.c                         |   64 +----
 tools/libxc/xg_private.c                             |   12 -
 tools/pygrub/src/pygrub                              |   18 -
 tools/python/xen/xend/XendAPI.py                     |   17 +
 tools/python/xen/xend/XendNode.py                    |    6 
 tools/python/xen/xm/migrate.py                       |   17 +
 tools/python/xen/xm/xenapi_create.py                 |   22 +
 xen/arch/powerpc/0opt.c                              |    6 
 xen/arch/powerpc/domain_build.c                      |    2 
 xen/arch/powerpc/ofd_fixup.c                         |    5 
 xen/arch/x86/domain.c                                |    4 
 xen/arch/x86/hvm/hvm.c                               |  222 +++++++++++++++----
 xen/arch/x86/hvm/intercept.c                         |   46 +--
 xen/arch/x86/hvm/io.c                                |    2 
 xen/arch/x86/hvm/platform.c                          |   46 +++
 xen/arch/x86/hvm/svm/svm.c                           |   33 ++
 xen/arch/x86/hvm/vmx/vmx.c                           |   21 +
 xen/arch/x86/mm.c                                    |   22 +
 xen/common/domain.c                                  |    3 
 xen/include/asm-powerpc/system.h                     |    6 
 xen/include/asm-x86/hvm/domain.h                     |   12 -
 xen/include/asm-x86/hvm/hvm.h                        |    1 
 xen/include/asm-x86/hvm/support.h                    |   15 -
 xen/include/public/hvm/save.h                        |   35 ++
 xen/include/xen/domain_page.h                        |    4 
 29 files changed, 473 insertions(+), 241 deletions(-)

diff -r c42ae7839750 -r 039daabebad5 .hgignore
--- a/.hgignore Fri Apr 13 08:33:21 2007 -0600
+++ b/.hgignore Fri Apr 13 16:07:48 2007 +0100
@@ -22,7 +22,7 @@
 ^\.pc
 ^TAGS$
 ^tags$
-^build.*$
+^build-.*$
 ^dist/.*$
 ^docs/.*\.aux$
 ^docs/.*\.dvi$
diff -r c42ae7839750 -r 039daabebad5 README
--- a/README    Fri Apr 13 08:33:21 2007 -0600
+++ b/README    Fri Apr 13 16:07:48 2007 +0100
@@ -199,3 +199,7 @@ Xend (the Xen daemon) has the following 
     * For optional PAM support, PyPAM:
           URL:    http://www.pangalactic.org/PyPAM/
           Debian: python-pam
+
+    * For optional XenAPI support in XM, PyXML:
+          URL:    http://pyxml.sourceforge.net
+          YUM:    PyXML
diff -r c42ae7839750 -r 039daabebad5 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Apr 13 
08:33:21 2007 -0600
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Apr 13 
16:07:48 2007 +0100
@@ -622,14 +622,14 @@ static int network_open(struct net_devic
 
        memset(&np->stats, 0, sizeof(np->stats));
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (netfront_carrier_ok(np)) {
                network_alloc_rx_buffers(dev);
                np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
                if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
                        netif_rx_schedule(dev);
        }
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
 
        network_maybe_wake_tx(dev);
 
@@ -1307,7 +1307,7 @@ static int netif_poll(struct net_device 
        int pages_flipped = 0;
        int err;
 
-       spin_lock(&np->rx_lock);
+       spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
 
        if (unlikely(!netfront_carrier_ok(np))) {
                spin_unlock(&np->rx_lock);
@@ -1520,7 +1520,7 @@ static void netif_release_rx_bufs(struct
 
        skb_queue_head_init(&free_list);
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
 
        for (id = 0; id < NET_RX_RING_SIZE; id++) {
                if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
@@ -1588,7 +1588,7 @@ static void netif_release_rx_bufs(struct
        while ((skb = __skb_dequeue(&free_list)) != NULL)
                dev_kfree_skb(skb);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
 }
 
 static int network_close(struct net_device *dev)
@@ -1708,8 +1708,8 @@ static int network_connect(struct net_de
        IPRINTK("device %s has %sing receive path.\n",
                dev->name, np->copying_receiver ? "copy" : "flipp");
 
+       spin_lock_bh(&np->rx_lock);
        spin_lock_irq(&np->tx_lock);
-       spin_lock(&np->rx_lock);
 
        /*
         * Recovery procedure:
@@ -1761,8 +1761,8 @@ static int network_connect(struct net_de
        network_tx_buf_gc(dev);
        network_alloc_rx_buffers(dev);
 
-       spin_unlock(&np->rx_lock);
        spin_unlock_irq(&np->tx_lock);
+       spin_unlock_bh(&np->rx_lock);
 
        return 0;
 }
@@ -1818,7 +1818,7 @@ static ssize_t store_rxbuf_min(struct cl
        if (target > RX_MAX_TARGET)
                target = RX_MAX_TARGET;
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (target > np->rx_max_target)
                np->rx_max_target = target;
        np->rx_min_target = target;
@@ -1827,7 +1827,7 @@ static ssize_t store_rxbuf_min(struct cl
 
        network_alloc_rx_buffers(netdev);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
        return len;
 }
 
@@ -1861,7 +1861,7 @@ static ssize_t store_rxbuf_max(struct cl
        if (target > RX_MAX_TARGET)
                target = RX_MAX_TARGET;
 
-       spin_lock(&np->rx_lock);
+       spin_lock_bh(&np->rx_lock);
        if (target < np->rx_min_target)
                np->rx_min_target = target;
        np->rx_max_target = target;
@@ -1870,7 +1870,7 @@ static ssize_t store_rxbuf_max(struct cl
 
        network_alloc_rx_buffers(netdev);
 
-       spin_unlock(&np->rx_lock);
+       spin_unlock_bh(&np->rx_lock);
        return len;
 }
 
@@ -2033,11 +2033,11 @@ static void netif_disconnect_backend(str
 static void netif_disconnect_backend(struct netfront_info *info)
 {
        /* Stop old i/f to prevent errors whilst we rebuild the state. */
+       spin_lock_bh(&info->rx_lock);
        spin_lock_irq(&info->tx_lock);
-       spin_lock(&info->rx_lock);
        netfront_carrier_off(info);
-       spin_unlock(&info->rx_lock);
        spin_unlock_irq(&info->tx_lock);
+       spin_unlock_bh(&info->rx_lock);
 
        if (info->irq)
                unbind_from_irqhandler(info->irq, info->netdev);
diff -r c42ae7839750 -r 039daabebad5 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/libxc/xc_domain_restore.c   Fri Apr 13 16:07:48 2007 +0100
@@ -688,33 +688,22 @@ int xc_domain_restore(int xc_handle, int
             ERROR("error zeroing magic pages");
             goto out;
         }
-        
-        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, magic_pfns[0]);
-        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, 
magic_pfns[1]);
-        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, magic_pfns[2]);
-        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
-        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
+                
+        if ( (rc = xc_set_hvm_param(xc_handle, dom, 
+                                    HVM_PARAM_IOREQ_PFN, magic_pfns[0]))
+             || (rc = xc_set_hvm_param(xc_handle, dom, 
+                                       HVM_PARAM_BUFIOREQ_PFN, magic_pfns[1]))
+             || (rc = xc_set_hvm_param(xc_handle, dom, 
+                                       HVM_PARAM_STORE_PFN, magic_pfns[2]))
+             || (rc = xc_set_hvm_param(xc_handle, dom, 
+                                       HVM_PARAM_PAE_ENABLED, pae))
+             || (rc = xc_set_hvm_param(xc_handle, dom, 
+                                       HVM_PARAM_STORE_EVTCHN, store_evtchn)) )
+        {
+            ERROR("error setting HVM params: %i", rc);
+            goto out;
+        }
         *store_mfn = magic_pfns[2];
-
-        /* Read vcpu contexts */
-        for ( i = 0; i <= max_vcpu_id; i++ )
-        {
-            if ( !(vcpumap & (1ULL << i)) )
-                continue;
-
-            if ( !read_exact(io_fd, &(ctxt), sizeof(ctxt)) )
-            {
-                ERROR("error read vcpu context.\n");
-                goto out;
-            }
-            
-            if ( (rc = xc_vcpu_setcontext(xc_handle, dom, i, &ctxt)) )
-            {
-                ERROR("Could not set vcpu context, rc=%d", rc);
-                goto out;
-            }
-            rc = 1;
-        }
 
         /* Read HVM context */
         if ( !read_exact(io_fd, &rec_len, sizeof(uint32_t)) )
diff -r c42ae7839750 -r 039daabebad5 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/libxc/xc_domain_save.c      Fri Apr 13 16:07:48 2007 +0100
@@ -378,8 +378,7 @@ static int analysis_phase(int xc_handle,
 
 
 static int suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
-                             int dom, xc_dominfo_t *info,
-                             vcpu_guest_context_t *ctxt)
+                             int dom, xc_dominfo_t *info)
 {
     int i = 0;
 
@@ -396,10 +395,6 @@ static int suspend_and_state(int (*suspe
         ERROR("Could not get domain info");
         return -1;
     }
-
-    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
-        ERROR("Could not get vcpu context");
-
 
     if ( info->dying )
     {
@@ -663,10 +658,11 @@ static xen_pfn_t *map_and_save_p2m_table
 static xen_pfn_t *map_and_save_p2m_table(int xc_handle, 
                                          int io_fd, 
                                          uint32_t dom,
-                                         vcpu_guest_context_t *ctxt,
                                          unsigned long p2m_size,
                                          shared_info_t *live_shinfo)
 {
+    vcpu_guest_context_t ctxt;
+
     /* Double and single indirect references to the live P2M table */
     xen_pfn_t *live_p2m_frame_list_list = NULL;
     xen_pfn_t *live_p2m_frame_list = NULL;
@@ -728,6 +724,12 @@ static xen_pfn_t *map_and_save_p2m_table
                   (uint64_t)p2m_frame_list[i/fpp]);
             goto out;
         }
+    }
+
+    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
+    {
+        ERROR("Could not get vcpu context");
+        goto out;
     }
 
     /*
@@ -736,7 +738,7 @@ static xen_pfn_t *map_and_save_p2m_table
      * slow paths in the restore code.
      */
     if ( (pt_levels == 3) &&
-         (ctxt->vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
+         (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
     {
         unsigned long signature = ~0UL;
         uint32_t tot_sz   = sizeof(struct vcpu_guest_context) + 8;
@@ -746,7 +748,7 @@ static xen_pfn_t *map_and_save_p2m_table
              !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
              !write_exact(io_fd, &chunk_sig, 4) ||
              !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
-             !write_exact(io_fd, ctxt,       sizeof(*ctxt)) )
+             !write_exact(io_fd, &ctxt,      sizeof(ctxt)) )
         {
             ERROR("write: extended info");
             goto out;
@@ -853,11 +855,6 @@ int xc_domain_save(int xc_handle, int io
         return 1;
     }
 
-    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
-    {
-        ERROR("Could not get vcpu context");
-        goto out;
-    }
     shared_info_frame = info.shared_info_frame;
 
     /* Map the shared info frame */
@@ -900,7 +897,7 @@ int xc_domain_save(int xc_handle, int io
     else
     {
         /* This is a non-live suspend. Suspend the domain .*/
-        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt) )
+        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
         {
             ERROR("Domain appears not to have suspended");
             goto out;
@@ -999,7 +996,7 @@ int xc_domain_save(int xc_handle, int io
 
         /* Map the P2M table, and write the list of P2M frames */
         live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, 
-                                          &ctxt, p2m_size, live_shinfo);
+                                          p2m_size, live_shinfo);
         if ( live_p2m == NULL )
         {
             ERROR("Failed to map/save the p2m frame list");
@@ -1304,17 +1301,13 @@ int xc_domain_save(int xc_handle, int io
                 DPRINTF("Start last iteration\n");
                 last_iter = 1;
 
-                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
-                                       &ctxt) )
+                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
                 {
                     ERROR("Domain appears not to have suspended");
                     goto out;
                 }
 
-                DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
-                        info.shared_info_frame,
-                        (unsigned long)ctxt.user_regs.eip,
-                        (unsigned long)ctxt.user_regs.edx);
+                DPRINTF("SUSPEND shinfo %08lx\n", info.shared_info_frame);
             }
 
             if ( xc_shadow_control(xc_handle, dom, 
@@ -1410,27 +1403,6 @@ int xc_domain_save(int xc_handle, int io
             goto out;
         }
 
-        /* Save vcpu contexts */
-
-        for ( i = 0; i <= info.max_vcpu_id; i++ )
-        {
-            if ( !(vcpumap & (1ULL << i)) )
-                continue;
-            
-            if ( xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
-            {
-                ERROR("HVM:Could not get vcpu context");
-                goto out;
-            }
-            
-            DPRINTF("write vcpu %d context.\n", i); 
-            if ( !write_exact(io_fd, &(ctxt), sizeof(ctxt)) )
-            {
-                ERROR("write vcpu context failed!\n");
-                goto out;
-            }
-        }
-
         /* Get HVM context from Xen and save it too */
         if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
                                                   hvm_buf_size)) == -1 )
@@ -1492,6 +1464,12 @@ int xc_domain_save(int xc_handle, int io
                 j = 0;
             }
         }
+    }
+
+    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
+    {
+        ERROR("Could not get vcpu context");
+        goto out;
     }
 
     /* Canonicalise the suspend-record frame number. */
diff -r c42ae7839750 -r 039daabebad5 tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/libxc/xg_private.c  Fri Apr 13 16:07:48 2007 +0100
@@ -196,18 +196,6 @@ __attribute__((weak))
 {
     errno = ENOSYS;
     return -1;
-}
-
-__attribute__((weak)) int xc_get_hvm_param(
-    int handle, domid_t dom, int param, unsigned long *value)
-{
-    return -ENOSYS;
-}
-
-__attribute__((weak)) int xc_set_hvm_param(
-    int handle, domid_t dom, int param, unsigned long value)
-{
-    return -ENOSYS;
 }
 
 /*
diff -r c42ae7839750 -r 039daabebad5 tools/pygrub/src/pygrub
--- a/tools/pygrub/src/pygrub   Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/pygrub/src/pygrub   Fri Apr 13 16:07:48 2007 +0100
@@ -61,13 +61,6 @@ def get_active_partition(file):
         if struct.unpack("<c", buf[poff:poff+1]) == ('\x80',):
             return buf[poff:poff+16]
 
-        # type=0xee: GUID partition table
-        # XXX assume the first partition is active
-        if struct.unpack("<c", buf[poff+4:poff+5]) == ('\xee',):
-            os.lseek(fd, 0x400, 0)
-            buf = os.read(fd, 512)
-            return buf[24:40] # XXX buf[32:40]
-
     # if there's not a partition marked as active, fall back to
     # the first partition
     return buf[446:446+16]
@@ -97,8 +90,16 @@ def get_solaris_slice(file, offset):
 
     raise RuntimeError, "No root slice found"      
 
+def get_fs_offset_gpt(file):
+    fd = os.open(file, os.O_RDONLY)
+    # assume the first partition is an EFI system partition.
+    os.lseek(fd, SECTOR_SIZE * 2, 0)
+    buf = os.read(fd, 512)
+    return struct.unpack("<Q", buf[32:40])[0] * SECTOR_SIZE
+
 FDISK_PART_SOLARIS=0xbf
 FDISK_PART_SOLARIS_OLD=0x82
+FDISK_PART_GPT=0xee
 
 def get_fs_offset(file):
     if not is_disk_image(file):
@@ -115,6 +116,9 @@ def get_fs_offset(file):
     if type == FDISK_PART_SOLARIS or type == FDISK_PART_SOLARIS_OLD:
         offset += get_solaris_slice(file, offset)
 
+    if type == FDISK_PART_GPT:
+        offset = get_fs_offset_gpt(file)
+    
     return offset
 
 class GrubLineEditor(curses.textpad.Textbox):
diff -r c42ae7839750 -r 039daabebad5 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/python/xen/xend/XendAPI.py  Fri Apr 13 16:07:48 2007 +0100
@@ -96,7 +96,10 @@ def datetime(when = None):
     @param when The time in question, given as seconds since the epoch, UTC.
                 May be None, in which case the current time is used.
     """
-    return xmlrpclib.DateTime(time.gmtime(when))
+    if when is None:
+        return xmlrpclib.DateTime(time.gmtime())
+    else:
+        return xmlrpclib.DateTime(time.gmtime(when))
 
 
 # ---------------------------------------------------
@@ -1304,6 +1307,7 @@ class XendAPI(object):
                   ('set_memory_dynamic_max_live', None),
                   ('set_memory_dynamic_min_live', None),
                   ('send_trigger', None),
+                  ('migrate', None),
                   ('destroy', None)]
     
     VM_funcs  = [('create', 'VM'),
@@ -1823,6 +1827,17 @@ class XendAPI(object):
         xendom.domain_send_trigger(xeninfo.getDomid(), trigger, vcpu)
         return xen_api_success_void()
 
+    def VM_migrate(self, _, vm_ref, destination_url, live, other_config):
+        xendom = XendDomain.instance()
+        xeninfo = xendom.get_vm_by_uuid(vm_ref)
+
+        resource = other_config.get("resource", 0)
+        port = other_config.get("port", 0)
+        
+        xendom.domain_migrate(xeninfo.getDomid(), destination_url,
+                              bool(live), resource, port)
+        return xen_api_success_void()
+
     def VM_save(self, _, vm_ref, dest, checkpoint):
         xendom = XendDomain.instance()
         xeninfo = xendom.get_vm_by_uuid(vm_ref)
diff -r c42ae7839750 -r 039daabebad5 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/python/xen/xend/XendNode.py Fri Apr 13 16:07:48 2007 +0100
@@ -150,8 +150,10 @@ class XendNode:
                 
         # Get a mapping from interface to bridge
 
-        if_to_br = dict(reduce(lambda ls,(b,ifs):[(i,b) for i in ifs] + ls,
-                               Brctl.get_state().items(), []))
+        if_to_br = dict([(i,b)
+                         for (b,ifs) in Brctl.get_state().items()
+                         for i in ifs])
+
         # initialise PIFs
         saved_pifs = self.state_store.load_state('pif')
         if saved_pifs:
diff -r c42ae7839750 -r 039daabebad5 tools/python/xen/xm/migrate.py
--- a/tools/python/xen/xm/migrate.py    Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/python/xen/xm/migrate.py    Fri Apr 13 16:07:48 2007 +0100
@@ -23,7 +23,7 @@ import sys
 
 from xen.xm.opts import *
 
-from main import server
+from main import server, serverType, get_single_vm, SERVER_XEN_API
 
 gopts = Opts(use="""[options] DOM HOST
 
@@ -60,5 +60,16 @@ def main(argv):
 
     dom = args[0]
     dst = args[1]
-    server.xend.domain.migrate(dom, dst, opts.vals.live, opts.vals.resource,
-                               opts.vals.port)
+
+    if serverType == SERVER_XEN_API:
+        vm_ref = get_single_vm(dom)
+        other_config = {
+            "port":     opts.vals.port,
+            "resource": opts.vals.resource
+            }
+        server.xenapi.VM.migrate(vm_ref, dst, bool(opts.vals.live),
+                                 other_config)
+    else:
+        server.xend.domain.migrate(dom, dst, opts.vals.live,
+                                   opts.vals.resource,
+                                   opts.vals.port)
diff -r c42ae7839750 -r 039daabebad5 tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py      Fri Apr 13 08:33:21 2007 -0600
+++ b/tools/python/xen/xm/xenapi_create.py      Fri Apr 13 16:07:48 2007 +0100
@@ -48,7 +48,7 @@ def get_name_description(node):
 
 def get_text_in_child_node(node, child):
     tag_node = node.getElementsByTagName(child)[0]
-    return tag_node.nodeValue
+    return " ".join([child.nodeValue for child in tag_node.childNodes])
 
 def get_child_node_attribute(node, child, attribute):
     tag_node = node.getElementsByTagName(child)[0]
@@ -264,7 +264,23 @@ class xenapi_create:
             "platform":
                 get_child_nodes_as_dict(vm, "platform", "key", "value"),
             "other_config":
-                get_child_nodes_as_dict(vm, "other_config", "key", "value")
+                get_child_nodes_as_dict(vm, "other_config", "key", "value"),
+            "PV_bootloader":
+                "",
+            "PV_kernel":
+                "",
+            "PV_ramdisk":
+                "",
+            "PV_args":
+                "",
+            "PV_bootloader_args":
+                "",
+            "HVM_boot_policy":
+                "",
+            "HVM_boot_params":
+                {},
+            "PCI_bus":
+               ""
             }
 
         if len(vm.getElementsByTagName("pv")) > 0:
@@ -494,7 +510,7 @@ class sxp2xml:
         # Make version tag
 
         version = document.createElement("version")
-        version.appendChild(document.createTextNode("1.0"))
+        version.appendChild(document.createTextNode("0"))
         vm.appendChild(version)
         
         # Make pv or hvm tag
diff -r c42ae7839750 -r 039daabebad5 xen/arch/powerpc/0opt.c
--- a/xen/arch/powerpc/0opt.c   Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/powerpc/0opt.c   Fri Apr 13 16:07:48 2007 +0100
@@ -21,6 +21,12 @@
 #include <xen/config.h>
 #include <xen/lib.h>
 
+extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void)
+{
+    BUG();
+}
+
 extern void __cmpxchg_called_with_bad_pointer(void);
 void __cmpxchg_called_with_bad_pointer(void)
 {
diff -r c42ae7839750 -r 039daabebad5 xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c   Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/powerpc/domain_build.c   Fri Apr 13 16:07:48 2007 +0100
@@ -229,7 +229,7 @@ int construct_dom0(struct domain *d,
     /* Load the dom0 kernel. */
     elf.dest = (void *)dst;
     elf_load_binary(&elf);
-    v->arch.ctxt.pc = dst - rma;
+    v->arch.ctxt.pc = dst - rma + (parms.virt_entry - parms.virt_kstart);
     dst = ALIGN_UP(dst + parms.virt_kend, PAGE_SIZE);
 
     /* Load the initrd. */
diff -r c42ae7839750 -r 039daabebad5 xen/arch/powerpc/ofd_fixup.c
--- a/xen/arch/powerpc/ofd_fixup.c      Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/powerpc/ofd_fixup.c      Fri Apr 13 16:07:48 2007 +0100
@@ -264,7 +264,7 @@ static ofdn_t ofd_chosen_props(void *m, 
     ofdn_t n;
     ofdn_t p;
     static const char path[] = "/chosen";
-    char bootargs[256];
+    char bootargs[256] = { 0, };
     int bsz;
     int sz;
     int rm;
@@ -276,7 +276,8 @@ static ofdn_t ofd_chosen_props(void *m, 
                      &path[1], sizeof (path) - 1);
     }
 
-    strlcpy(bootargs, cmdline, sizeof(bootargs));
+    if (cmdline)
+        strlcpy(bootargs, cmdline, sizeof(bootargs));
     bsz = strlen(bootargs) + 1;
     rm = sizeof (bootargs) - bsz;
 
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/domain.c     Fri Apr 13 16:07:48 2007 +0100
@@ -1540,8 +1540,10 @@ void domain_relinquish_resources(struct 
     relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
     relinquish_memory(d, &d->page_list, PGT_l2_page_table);
 
-    /* Free page used by xen oprofile buffer */
+    /* Free page used by xen oprofile buffer. */
     free_xenoprof_pages(d);
+
+    hvm_domain_relinquish_resources(d);
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/hvm.c    Fri Apr 13 16:07:48 2007 +0100
@@ -101,7 +101,7 @@ void hvm_set_guest_time(struct vcpu *v, 
 
 u64 hvm_get_guest_time(struct vcpu *v)
 {
-    u64    host_tsc;
+    u64 host_tsc;
 
     rdtscll(host_tsc);
     return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
@@ -125,7 +125,7 @@ void hvm_do_resume(struct vcpu *v)
     pt_thaw_time(v);
 
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+    p = &get_ioreq(v)->vp_ioreq;
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
@@ -146,6 +146,73 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
+static void hvm_init_ioreq_page(
+    struct domain *d, struct hvm_ioreq_page *iorp)
+{
+    memset(iorp, 0, sizeof(*iorp));
+    spin_lock_init(&iorp->lock);
+    domain_pause(d);
+}
+
+static void hvm_destroy_ioreq_page(
+    struct domain *d, struct hvm_ioreq_page *iorp)
+{
+    spin_lock(&iorp->lock);
+
+    ASSERT(d->is_dying);
+
+    if ( iorp->va != NULL )
+    {
+        unmap_domain_page_global(iorp->va);
+        put_page_and_type(iorp->page);
+        iorp->va = NULL;
+    }
+
+    spin_unlock(&iorp->lock);
+}
+
+static int hvm_set_ioreq_page(
+    struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
+{
+    struct page_info *page;
+    unsigned long mfn;
+    void *va;
+
+    mfn = gmfn_to_mfn(d, gmfn);
+    if ( !mfn_valid(mfn) )
+        return -EINVAL;
+
+    page = mfn_to_page(mfn);
+    if ( !get_page_and_type(page, d, PGT_writable_page) )
+        return -EINVAL;
+
+    va = map_domain_page_global(mfn);
+    if ( va == NULL )
+    {
+        put_page_and_type(page);
+        return -ENOMEM;
+    }
+
+    spin_lock(&iorp->lock);
+
+    if ( (iorp->va != NULL) || d->is_dying )
+    {
+        spin_unlock(&iorp->lock);
+        unmap_domain_page_global(va);
+        put_page_and_type(mfn_to_page(mfn));
+        return -EINVAL;
+    }
+
+    iorp->va = va;
+    iorp->page = page;
+
+    spin_unlock(&iorp->lock);
+
+    domain_unpause(d);
+
+    return 0;
+}
+
 int hvm_domain_initialise(struct domain *d)
 {
     int rc;
@@ -158,10 +225,8 @@ int hvm_domain_initialise(struct domain 
     }
 
     spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
-    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
 
-    /* paging support will be determined inside paging.c */
     rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
     if ( rc != 0 )
         return rc;
@@ -169,7 +234,16 @@ int hvm_domain_initialise(struct domain 
     vpic_init(d);
     vioapic_init(d);
 
+    hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
+    hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+
     return 0;
+}
+
+void hvm_domain_relinquish_resources(struct domain *d)
+{
+    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
+    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
 }
 
 void hvm_domain_destroy(struct domain *d)
@@ -178,19 +252,13 @@ void hvm_domain_destroy(struct domain *d
     rtc_deinit(d);
     pmtimer_deinit(d);
     hpet_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 }
 
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
+    struct vcpu_guest_context *vc;
 
     for_each_vcpu(d, v)
     {
@@ -199,7 +267,40 @@ static int hvm_save_cpu_ctxt(struct doma
         if ( test_bit(_VPF_down, &v->pause_flags) ) 
             continue;
 
+        /* Architecture-specific vmcs/vmcb bits */
         hvm_funcs.save_cpu_ctxt(v, &ctxt);
+
+        /* Other vcpu register state */
+        vc = &v->arch.guest_context;
+        if ( vc->flags & VGCF_i387_valid )
+            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
+        else 
+            memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+        ctxt.rax = vc->user_regs.eax;
+        ctxt.rbx = vc->user_regs.ebx;
+        ctxt.rcx = vc->user_regs.ecx;
+        ctxt.rdx = vc->user_regs.edx;
+        ctxt.rbp = vc->user_regs.ebp;
+        ctxt.rsi = vc->user_regs.esi;
+        ctxt.rdi = vc->user_regs.edi;
+        /* %rsp handled by arch-specific call above */
+#ifdef __x86_64__        
+        ctxt.r8  = vc->user_regs.r8;
+        ctxt.r9  = vc->user_regs.r9;
+        ctxt.r10 = vc->user_regs.r10;
+        ctxt.r11 = vc->user_regs.r11;
+        ctxt.r12 = vc->user_regs.r12;
+        ctxt.r13 = vc->user_regs.r13;
+        ctxt.r14 = vc->user_regs.r14;
+        ctxt.r15 = vc->user_regs.r15;
+#endif
+        ctxt.dr0 = vc->debugreg[0];
+        ctxt.dr1 = vc->debugreg[1];
+        ctxt.dr2 = vc->debugreg[2];
+        ctxt.dr3 = vc->debugreg[3];
+        ctxt.dr6 = vc->debugreg[6];
+        ctxt.dr7 = vc->debugreg[7];
+
         if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1; 
     }
@@ -208,9 +309,10 @@ static int hvm_save_cpu_ctxt(struct doma
 
 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
-    int vcpuid;
+    int vcpuid, rc;
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
+    struct vcpu_guest_context *vc;
 
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
@@ -219,12 +321,51 @@ static int hvm_load_cpu_ctxt(struct doma
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+    vc = &v->arch.guest_context;
+
+    /* Need to init this vcpu before loading its contents */
+    LOCK_BIGLOCK(d);
+    if ( !v->is_initialised )
+        if ( (rc = boot_vcpu(d, vcpuid, vc)) != 0 )
+            return rc;
+    UNLOCK_BIGLOCK(d);
 
     if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 
         return -EINVAL;
 
+    /* Architecture-specific vmcs/vmcb bits */
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
+
+    /* Other vcpu register state */
+    memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+    vc->user_regs.eax = ctxt.rax;
+    vc->user_regs.ebx = ctxt.rbx;
+    vc->user_regs.ecx = ctxt.rcx;
+    vc->user_regs.edx = ctxt.rdx;
+    vc->user_regs.ebp = ctxt.rbp;
+    vc->user_regs.esi = ctxt.rsi;
+    vc->user_regs.edi = ctxt.rdi;
+    vc->user_regs.esp = ctxt.rsp;
+#ifdef __x86_64__
+    vc->user_regs.r8  = ctxt.r8; 
+    vc->user_regs.r9  = ctxt.r9; 
+    vc->user_regs.r10 = ctxt.r10;
+    vc->user_regs.r11 = ctxt.r11;
+    vc->user_regs.r12 = ctxt.r12;
+    vc->user_regs.r13 = ctxt.r13;
+    vc->user_regs.r14 = ctxt.r14;
+    vc->user_regs.r15 = ctxt.r15;
+#endif
+    vc->debugreg[0] = ctxt.dr0;
+    vc->debugreg[1] = ctxt.dr1;
+    vc->debugreg[2] = ctxt.dr2;
+    vc->debugreg[3] = ctxt.dr3;
+    vc->debugreg[6] = ctxt.dr6;
+    vc->debugreg[7] = ctxt.dr7;
+
+    vc->flags = VGCF_i387_valid | VGCF_online;
+    v->fpu_initialised = 1;
 
     /* Auxiliary processors should be woken immediately. */
     if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
@@ -250,10 +391,20 @@ int hvm_vcpu_initialise(struct vcpu *v)
     }
 
     /* Create ioreq event channel. */
-    v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
-    if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
-        get_vio(v->domain, v->vcpu_id)->vp_eport =
-            v->arch.hvm_vcpu.xen_port;
+    rc = alloc_unbound_xen_event_channel(v, 0);
+    if ( rc < 0 )
+    {
+        hvm_funcs.vcpu_destroy(v);
+        vlapic_destroy(v);
+        return rc;
+    }
+
+    /* Register ioreq event channel. */
+    v->arch.hvm_vcpu.xen_port = rc;
+    spin_lock(&v->domain->arch.hvm_domain.ioreq.lock);
+    if ( v->domain->arch.hvm_domain.ioreq.va != NULL )
+        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
+    spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
 
     INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
 
@@ -334,7 +485,7 @@ void hvm_send_assist_req(struct vcpu *v)
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return; /* implicitly bins the i/o operation */
 
-    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+    p = &get_ioreq(v)->vp_ioreq;
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model.  Crash the domain. */
@@ -852,10 +1003,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
     case HVMOP_get_param:
     {
         struct xen_hvm_param a;
+        struct hvm_ioreq_page *iorp;
         struct domain *d;
         struct vcpu *v;
-        unsigned long mfn;
-        void *p;
 
         if ( copy_from_guest(&a, arg, 1) )
             return -EFAULT;
@@ -882,30 +1032,18 @@ long do_hvm_op(unsigned long op, XEN_GUE
             switch ( a.index )
             {
             case HVM_PARAM_IOREQ_PFN:
-                if ( d->arch.hvm_domain.shared_page_va )
-                    goto param_fail;
-                mfn = gmfn_to_mfn(d, a.value);
-                if ( mfn == INVALID_MFN )
-                    goto param_fail;
-                p = map_domain_page_global(mfn);
-                if ( p == NULL )
-                    goto param_fail;
-                d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-                /* Initialise evtchn port info if VCPUs already created. */
-                for_each_vcpu ( d, v )
-                    get_vio(d, v->vcpu_id)->vp_eport =
-                    v->arch.hvm_vcpu.xen_port;
+                iorp = &d->arch.hvm_domain.ioreq;
+                rc = hvm_set_ioreq_page(d, iorp, a.value);
+                spin_lock(&iorp->lock);
+                if ( (rc == 0) && (iorp->va != NULL) )
+                    /* Initialise evtchn port info if VCPUs already created. */
+                    for_each_vcpu ( d, v )
+                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
+                spin_unlock(&iorp->lock);
                 break;
-            case HVM_PARAM_BUFIOREQ_PFN:
-                if ( d->arch.hvm_domain.buffered_io_va )
-                    goto param_fail;
-                mfn = gmfn_to_mfn(d, a.value);
-                if ( mfn == INVALID_MFN )
-                    goto param_fail;
-                p = map_domain_page_global(mfn);
-                if ( p == NULL )
-                    goto param_fail;
-                d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
+            case HVM_PARAM_BUFIOREQ_PFN: 
+                iorp = &d->arch.hvm_domain.buf_ioreq;
+                rc = hvm_set_ioreq_page(d, iorp, a.value);
                 break;
             case HVM_PARAM_CALLBACK_IRQ:
                 hvm_set_callback_via(d, a.value);
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/intercept.c      Fri Apr 13 16:07:48 2007 +0100
@@ -158,34 +158,26 @@ int hvm_buffered_io_send(ioreq_t *p)
 int hvm_buffered_io_send(ioreq_t *p)
 {
     struct vcpu *v = current;
-    spinlock_t  *buffered_io_lock;
-    buffered_iopage_t *buffered_iopage =
-        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
-    unsigned long tmp_write_pointer = 0;
-
-    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
-    spin_lock(buffered_io_lock);
-
-    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
-         (unsigned int)IOREQ_BUFFER_SLOT_NUM ) {
-        /* the queue is full.
-         * send the iopacket through the normal path.
-         * NOTE: The arithimetic operation could handle the situation for
-         * write_pointer overflow.
-         */
-        spin_unlock(buffered_io_lock);
-        return 0;
-    }
-
-    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
-
-    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
-
-    /*make the ioreq_t visible before write_pointer*/
+    struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
+    buffered_iopage_t *pg = iorp->va;
+
+    spin_lock(&iorp->lock);
+
+    if ( (pg->write_pointer - pg->read_pointer) == IOREQ_BUFFER_SLOT_NUM )
+    {
+        /* The queue is full: send the iopacket through the normal path. */
+        spin_unlock(&iorp->lock);
+        return 0;
+    }
+
+    memcpy(&pg->ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
+           p, sizeof(ioreq_t));
+
+    /* Make the ioreq_t visible /before/ write_pointer. */
     wmb();
-    buffered_iopage->write_pointer++;
-
-    spin_unlock(buffered_io_lock);
+    pg->write_pointer++;
+
+    spin_unlock(&iorp->lock);
 
     return 1;
 }
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/io.c     Fri Apr 13 16:07:48 2007 +0100
@@ -832,7 +832,7 @@ void hvm_io_assist(void)
 
     io_opp = &v->arch.hvm_vcpu.io_op;
     regs   = &io_opp->io_context;
-    vio    = get_vio(d, v->vcpu_id);
+    vio    = get_ioreq(v);
 
     p = &vio->vp_ioreq;
     if ( p->state != STATE_IORESP_READY )
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/platform.c       Fri Apr 13 16:07:48 2007 +0100
@@ -221,7 +221,6 @@ static inline unsigned long get_immediat
 
     inst++; //skip ModR/M byte
     if ( ad_size != WORD && mod != 3 && rm == 4 ) {
-        rm = *inst & 7;
         inst++; //skip SIB byte
     }
 
@@ -255,6 +254,33 @@ static inline unsigned long get_immediat
     }
 
     return val;
+}
+
+/* Some instructions, like "add $imm8, r/m16"/"MOV $imm32, r/m64" require
+ * the src immediate operand be sign-extented befere the op is executed. Here
+ * we always sign-extend the operand to a "unsigned long" variable.
+ *
+ * Note: to simplify the logic here, the sign-extension here may be performed
+ * redundantly against some instructions, like "MOV $imm16, r/m16" -- however
+ * this is harmless, since we always remember the operand's size.
+ */
+static inline unsigned long get_immediate_sign_ext(int ad_size,
+                                                   const unsigned char *inst,
+                                                   int op_size)
+{
+    unsigned long result = get_immediate(ad_size, inst, op_size);
+
+    if ( op_size == QUAD )
+        op_size = LONG;
+
+    ASSERT( op_size == BYTE || op_size == WORD || op_size == LONG );
+
+    if ( result & (1UL << ((8*op_size) - 1)) )
+    {
+        unsigned long mask = ~0UL >> (8 * (sizeof(mask) - op_size));
+        result = ~mask | (result & mask);
+    }
+    return result;
 }
 
 static inline int get_index(const unsigned char *inst, unsigned char rex)
@@ -394,7 +420,9 @@ static int mmio_decode(int address_bytes
     case 8:
         if ( *op_size == 0 )
             *op_size = rex & 0x8 ? QUAD : LONG;
-        if ( *ad_size == 0 )
+        if ( *ad_size == WORD )
+            *ad_size = LONG;
+        else if ( *ad_size == 0 )
             *ad_size = QUAD;
         break;
 #endif
@@ -520,10 +548,10 @@ static int mmio_decode(int address_bytes
         /* opcode 0x83 always has a single byte operand */
         if ( opcode[0] == 0x83 )
             mmio_op->immediate =
-                (signed char)get_immediate(*ad_size, opcode + 1, BYTE);
+                get_immediate_sign_ext(*ad_size, opcode + 1, BYTE);
         else
             mmio_op->immediate =
-                get_immediate(*ad_size, opcode + 1, *op_size);
+                get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
 
         mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
         mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
@@ -677,7 +705,7 @@ static int mmio_decode(int address_bytes
 
             mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
             mmio_op->immediate =
-                    get_immediate(*ad_size, opcode + 1, *op_size);
+                    get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
             mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
 
             return DECODE_success;
@@ -699,7 +727,7 @@ static int mmio_decode(int address_bytes
 
             mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
             mmio_op->immediate =
-                    get_immediate(*ad_size, opcode + 1, *op_size);
+                    get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
             mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
 
             return DECODE_success;
@@ -838,7 +866,7 @@ void send_pio_req(unsigned long port, un
                port, count, size, value, dir, value_is_ptr);
     }
 
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_ioreq(v);
     if ( vio == NULL ) {
         printk("bad shared page: %lx\n", (unsigned long) vio);
         domain_crash_synchronous();
@@ -887,7 +915,7 @@ static void send_mmio_req(unsigned char 
                type, gpa, count, size, value, dir, value_is_ptr);
     }
 
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_ioreq(v);
     if (vio == NULL) {
         printk("bad shared page\n");
         domain_crash_synchronous();
@@ -948,7 +976,7 @@ void send_invalidate_req(void)
     vcpu_iodata_t *vio;
     ioreq_t *p;
 
-    vio = get_vio(v->domain, v->vcpu_id);
+    vio = get_ioreq(v);
     if ( vio == NULL )
     {
         printk("bad shared page: %lx\n", (unsigned long) vio);
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Apr 13 16:07:48 2007 +0100
@@ -233,7 +233,7 @@ int svm_vmcb_save(struct vcpu *v, struct
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    c->eip = vmcb->rip;
+    c->rip = vmcb->rip;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: eip=0x%"PRIx64".\n", 
@@ -241,10 +241,11 @@ int svm_vmcb_save(struct vcpu *v, struct
            inst_len, c->eip);
 #endif
 
-    c->esp = vmcb->rsp;
-    c->eflags = vmcb->rflags;
+    c->rsp = vmcb->rsp;
+    c->rflags = vmcb->rflags;
 
     c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+    c->cr2 = v->arch.hvm_svm.cpu_cr2;
     c->cr3 = v->arch.hvm_svm.cpu_cr3;
     c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
 
@@ -315,14 +316,14 @@ int svm_vmcb_restore(struct vcpu *v, str
     unsigned long mfn, old_base_mfn;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    vmcb->rip    = c->eip;
-    vmcb->rsp    = c->esp;
-    vmcb->rflags = c->eflags;
+    vmcb->rip    = c->rip;
+    vmcb->rsp    = c->rsp;
+    vmcb->rflags = c->rflags;
 
     v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
-    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET;
-    if ( !paging_mode_hap(v->domain) ) 
-        vmcb->cr0 |= X86_CR0_PG;
+    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
+
+    v->arch.hvm_svm.cpu_cr2 = c->cr2;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -421,6 +422,19 @@ int svm_vmcb_restore(struct vcpu *v, str
     vmcb->sysenter_esp = c->sysenter_esp;
     vmcb->sysenter_eip = c->sysenter_eip;
 
+    /* update VMCB for nested paging restore */
+    if ( paging_mode_hap(v->domain) ) {
+        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+        vmcb->cr3 = c->cr3;
+        vmcb->np_enable = 1;
+        vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
+        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+    }
+
+    vmcb->dr6 = c->dr6;
+    vmcb->dr7 = c->dr7;
+
     paging_update_paging_modes(v);
     return 0;
  
@@ -440,6 +454,7 @@ void svm_save_cpu_state(struct vcpu *v, 
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
     data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
+    data->msr_flags        = -1ULL;
 
     data->tsc = hvm_get_guest_time(v);
 }
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Apr 13 16:07:48 2007 +0100
@@ -370,11 +370,12 @@ static inline void __restore_debug_regis
 
 int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
 {    
-    c->eip = __vmread(GUEST_RIP);
-    c->esp = __vmread(GUEST_RSP);
-    c->eflags = __vmread(GUEST_RFLAGS);
+    c->rip = __vmread(GUEST_RIP);
+    c->rsp = __vmread(GUEST_RSP);
+    c->rflags = __vmread(GUEST_RFLAGS);
 
     c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
+    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
     c->cr3 = v->arch.hvm_vmx.cpu_cr3;
     c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
 
@@ -444,12 +445,14 @@ int vmx_vmcs_restore(struct vcpu *v, str
 
     vmx_vmcs_enter(v);
 
-    __vmwrite(GUEST_RIP, c->eip);
-    __vmwrite(GUEST_RSP, c->esp);
-    __vmwrite(GUEST_RFLAGS, c->eflags);
+    __vmwrite(GUEST_RIP, c->rip);
+    __vmwrite(GUEST_RSP, c->rsp);
+    __vmwrite(GUEST_RFLAGS, c->rflags);
 
     v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
     __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+
+    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", 
cr4=0x%"PRIx64".\n",
@@ -555,6 +558,8 @@ int vmx_vmcs_restore(struct vcpu *v, str
     __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
     __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
 
+    __vmwrite(GUEST_DR7, c->dr7);
+
     vmx_vmcs_exit(v);
 
     paging_update_paging_modes(v);
@@ -590,7 +595,7 @@ void vmx_save_cpu_state(struct vcpu *v, 
     data->shadow_gs = guest_state->shadow_gs;
 
     /* save msrs */
-    data->flags = guest_flags;
+    data->msr_flags        = guest_flags;
     data->msr_lstar        = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
     data->msr_star         = guest_state->msrs[VMX_INDEX_MSR_STAR];
     data->msr_cstar        = guest_state->msrs[VMX_INDEX_MSR_CSTAR];
@@ -607,7 +612,7 @@ void vmx_load_cpu_state(struct vcpu *v, 
     struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
 
     /* restore msrs */
-    guest_state->flags = data->flags;
+    guest_state->flags = data->msr_flags;
     guest_state->msrs[VMX_INDEX_MSR_LSTAR]        = data->msr_lstar;
     guest_state->msrs[VMX_INDEX_MSR_STAR]         = data->msr_star;
     guest_state->msrs[VMX_INDEX_MSR_CSTAR]        = data->msr_cstar;
diff -r c42ae7839750 -r 039daabebad5 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/arch/x86/mm.c Fri Apr 13 16:07:48 2007 +0100
@@ -2041,7 +2041,7 @@ int do_mmuext_op(
                 MEM_LOG("Error while pinning mfn %lx", mfn);
                 break;
             }
-            
+
             if ( unlikely(test_and_set_bit(_PGT_pinned,
                                            &page->u.inuse.type_info)) )
             {
@@ -2054,14 +2054,18 @@ int do_mmuext_op(
             /* A page is dirtied when its pin status is set. */
             mark_dirty(d, mfn);
            
-            /*
-             * We can race domain destruction (domain_relinquish_resources).
-             * NB. The dying-flag test must happen /after/ setting PGT_pinned.
-             */
-            if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) &&
-                 this_cpu(percpu_mm_info).foreign->is_dying &&
-                 test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
-                put_page_and_type(page);
+            /* We can race domain destruction (domain_relinquish_resources). */
+            if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) )
+            {
+                int drop_ref;
+                spin_lock(&FOREIGNDOM->page_alloc_lock);
+                drop_ref = (FOREIGNDOM->is_dying &&
+                            test_and_clear_bit(_PGT_pinned,
+                                               &page->u.inuse.type_info));
+                spin_unlock(&FOREIGNDOM->page_alloc_lock);
+                if ( drop_ref )
+                    put_page_and_type(page);
+            }
 
             break;
 
diff -r c42ae7839750 -r 039daabebad5 xen/common/domain.c
--- a/xen/common/domain.c       Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/common/domain.c       Fri Apr 13 16:07:48 2007 +0100
@@ -313,9 +313,6 @@ void domain_kill(struct domain *d)
         return;
     }
 
-    /* Tear down state /after/ setting the dying flag. */
-    smp_wmb();
-
     gnttab_release_mappings(d);
     domain_relinquish_resources(d);
     put_domain(d);
diff -r c42ae7839750 -r 039daabebad5 xen/include/asm-powerpc/system.h
--- a/xen/include/asm-powerpc/system.h  Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/asm-powerpc/system.h  Fri Apr 13 16:07:48 2007 +0100
@@ -28,7 +28,11 @@
 #include <asm/processor.h>
 #include <asm/msr.h>
 
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned 
long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x)                                                           \
+({                                                                            \
+       __typeof__(*(ptr)) _x_ = (x);                                          \
+       (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); 
\
+})
 
 static __inline__ unsigned long
 __xchg_u32(volatile int *m, unsigned long val)
diff -r c42ae7839750 -r 039daabebad5 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/asm-x86/hvm/domain.h  Fri Apr 13 16:07:48 2007 +0100
@@ -28,10 +28,16 @@
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 
+struct hvm_ioreq_page {
+    spinlock_t lock;
+    struct page_info *page;
+    void *va;
+};
+
 struct hvm_domain {
-    unsigned long          shared_page_va;
-    unsigned long          buffered_io_va;
-    spinlock_t             buffered_io_lock;
+    struct hvm_ioreq_page  ioreq;
+    struct hvm_ioreq_page  buf_ioreq;
+
     s64                    tsc_frequency;
     struct pl_time         pl_time;
 
diff -r c42ae7839750 -r 039daabebad5 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Apr 13 16:07:48 2007 +0100
@@ -145,6 +145,7 @@ extern struct hvm_function_table hvm_fun
 extern struct hvm_function_table hvm_funcs;
 
 int hvm_domain_initialise(struct domain *d);
+void hvm_domain_relinquish_resources(struct domain *d);
 void hvm_domain_destroy(struct domain *d);
 
 int hvm_vcpu_initialise(struct vcpu *v);
diff -r c42ae7839750 -r 039daabebad5 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/asm-x86/hvm/support.h Fri Apr 13 16:07:48 2007 +0100
@@ -32,14 +32,13 @@
 #define HVM_DEBUG 1
 #endif
 
-static inline shared_iopage_t *get_sp(struct domain *d)
-{
-    return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
-}
-
-static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
-{
-    return &get_sp(d)->vcpu_iodata[cpu];
+static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
+    ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
+    ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
+    return &p->vcpu_iodata[v->vcpu_id];
 }
 
 /* XXX these are really VMX specific */
diff -r c42ae7839750 -r 039daabebad5 xen/include/public/hvm/save.h
--- a/xen/include/public/hvm/save.h     Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/public/hvm/save.h     Fri Apr 13 16:07:48 2007 +0100
@@ -87,12 +87,39 @@ DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct 
  */
 
 struct hvm_hw_cpu {
-    uint64_t eip;
-    uint64_t esp;
-    uint64_t eflags;
+    uint8_t  fpu_regs[512];
+
+    uint64_t rax;
+    uint64_t rbx;
+    uint64_t rcx;
+    uint64_t rdx;
+    uint64_t rbp;
+    uint64_t rsi;
+    uint64_t rdi;
+    uint64_t rsp;
+    uint64_t r8;
+    uint64_t r9;
+    uint64_t r10;
+    uint64_t r11;
+    uint64_t r12;
+    uint64_t r13;
+    uint64_t r14;
+    uint64_t r15;
+
+    uint64_t rip;
+    uint64_t rflags;
+
     uint64_t cr0;
+    uint64_t cr2;
     uint64_t cr3;
     uint64_t cr4;
+
+    uint64_t dr0;
+    uint64_t dr1;
+    uint64_t dr2;
+    uint64_t dr3;
+    uint64_t dr6;
+    uint64_t dr7;    
 
     uint32_t cs_sel;
     uint32_t ds_sel;
@@ -142,9 +169,9 @@ struct hvm_hw_cpu {
 
     /* msr for em64t */
     uint64_t shadow_gs;
-    uint64_t flags;
 
     /* msr content saved/restored. */
+    uint64_t msr_flags;
     uint64_t msr_lstar;
     uint64_t msr_star;
     uint64_t msr_cstar;
diff -r c42ae7839750 -r 039daabebad5 xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Fri Apr 13 08:33:21 2007 -0600
+++ b/xen/include/xen/domain_page.h     Fri Apr 13 16:07:48 2007 +0100
@@ -96,10 +96,10 @@ domain_mmap_cache_destroy(struct domain_
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
+#define map_domain_page(mfn)                mfn_to_virt(mfn)
 #define unmap_domain_page(va)               ((void)(va))
 
-#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
+#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
 #define unmap_domain_page_global(va)        ((void)(va))
 
 struct domain_mmap_cache { 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.