[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge with xenppc-unstable.hg



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1173093255 0
# Node ID e74bfc74471778ad6515cef1674bd529b6814ff0
# Parent  5a2b3a1b1f632475c8abdce25132e9d3ea767f7f
# Parent  939d2b7d4a12acea391c75324d189fa8858ffc9c
Merge with xenppc-unstable.hg
---
 linux-2.6-xen-sparse/mm/Kconfig                           |  157 -----------
 linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S          |    2 
 linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c           |   58 ++--
 linux-2.6-xen-sparse/arch/x86_64/kernel/head-xen.S        |    2 
 linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c        |   11 
 linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c           |  192 ++++++--------
 linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c    |   42 +--
 linux-2.6-xen-sparse/drivers/xen/netback/netback.c        |    9 
 linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      |   52 ++-
 linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h |   15 -
 linux-2.6-xen-sparse/include/linux/page-flags.h           |    6 
 linux-2.6-xen-sparse/include/xen/cpu_hotplug.h            |    2 
 tools/libxc/xc_dom_core.c                                 |    3 
 tools/libxc/xc_linux_restore.c                            |   24 +
 tools/libxc/xc_linux_save.c                               |   16 +
 tools/python/xen/xend/XendDomainInfo.py                   |    2 
 tools/python/xen/xend/XendNode.py                         |    4 
 tools/xenfb/xenfb.c                                       |    5 
 xen/Rules.mk                                              |    8 
 xen/arch/x86/domain.c                                     |   25 +
 xen/arch/x86/domain_build.c                               |    4 
 xen/arch/x86/domctl.c                                     |    7 
 xen/arch/x86/mm/shadow/common.c                           |   11 
 xen/drivers/acpi/numa.c                                   |    9 
 xen/include/acm/acm_hooks.h                               |    4 
 xen/include/public/arch-x86/xen.h                         |    1 
 xen/include/public/xen.h                                  |   18 -
 27 files changed, 305 insertions(+), 384 deletions(-)

diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S
--- a/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S  Fri Dec 15 08:16:56 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S  Mon Mar 05 11:14:15 
2007 +0000
@@ -11,8 +11,6 @@
 #include <asm/asm-offsets.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/elfnote.h>
-
-#define _PAGE_PRESENT 0x1
 
 /*
  * References to members of the new_cpu_data structure.
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c
--- a/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c   Fri Dec 15 08:16:56 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c   Mon Mar 05 11:14:15 
2007 +0000
@@ -573,64 +573,67 @@ void make_pages_writable(void *va, unsig
        }
 }
 
-static inline int pgd_walk_set_prot(struct page *page, pgprot_t flags)
+static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
 {
        unsigned long pfn = page_to_pfn(page);
-
-       if (PageHighMem(page))
-               return pgprot_val(flags) & _PAGE_RW
-                      ? test_and_clear_bit(PG_pinned, &page->flags)
-                      : !test_and_set_bit(PG_pinned, &page->flags);
-
-       BUG_ON(HYPERVISOR_update_va_mapping(
-               (unsigned long)__va(pfn << PAGE_SHIFT),
-               pfn_pte(pfn, flags), 0));
-
-       return 0;
-}
-
-static int pgd_walk(pgd_t *pgd_base, pgprot_t flags)
+       int rc;
+
+       if (PageHighMem(page)) {
+               if (pgprot_val(flags) & _PAGE_RW)
+                       clear_bit(PG_pinned, &page->flags);
+               else
+                       set_bit(PG_pinned, &page->flags);
+       } else {
+               rc = HYPERVISOR_update_va_mapping(
+                       (unsigned long)__va(pfn << PAGE_SHIFT),
+                       pfn_pte(pfn, flags), 0);
+               if (rc)
+                       BUG();
+       }
+}
+
+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
 {
        pgd_t *pgd = pgd_base;
        pud_t *pud;
        pmd_t *pmd;
-       int    g, u, m, flush;
+       int    g, u, m, rc;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return 0;
 
-       for (g = 0, flush = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+       for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
                if (pgd_none(*pgd))
                        continue;
                pud = pud_offset(pgd, 0);
                if (PTRS_PER_PUD > 1) /* not folded */
-                       flush |= pgd_walk_set_prot(virt_to_page(pud),flags);
+                       pgd_walk_set_prot(virt_to_page(pud),flags);
                for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
                        if (pud_none(*pud))
                                continue;
                        pmd = pmd_offset(pud, 0);
                        if (PTRS_PER_PMD > 1) /* not folded */
-                               flush |= 
pgd_walk_set_prot(virt_to_page(pmd),flags);
+                               pgd_walk_set_prot(virt_to_page(pmd),flags);
                        for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
                                if (pmd_none(*pmd))
                                        continue;
-                               flush |= 
pgd_walk_set_prot(pmd_page(*pmd),flags);
+                               pgd_walk_set_prot(pmd_page(*pmd),flags);
                        }
                }
        }
 
-       BUG_ON(HYPERVISOR_update_va_mapping(
+       rc = HYPERVISOR_update_va_mapping(
                (unsigned long)pgd_base,
                pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-               UVMF_TLB_FLUSH));
-
-       return flush;
+               UVMF_TLB_FLUSH);
+       if (rc)
+               BUG();
 }
 
 static void __pgd_pin(pgd_t *pgd)
 {
-       if (pgd_walk(pgd, PAGE_KERNEL_RO))
-               kmap_flush_unused();
+       pgd_walk(pgd, PAGE_KERNEL_RO);
+       kmap_flush_unused();
        xen_pgd_pin(__pa(pgd));
        set_bit(PG_pinned, &virt_to_page(pgd)->flags);
 }
@@ -638,8 +641,7 @@ static void __pgd_unpin(pgd_t *pgd)
 static void __pgd_unpin(pgd_t *pgd)
 {
        xen_pgd_unpin(__pa(pgd));
-       if (pgd_walk(pgd, PAGE_KERNEL))
-               kmap_flush_unused();
+       pgd_walk(pgd, PAGE_KERNEL);
        clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
 }
 
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/arch/x86_64/kernel/head-xen.S
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/head-xen.S        Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/head-xen.S        Mon Mar 05 
11:14:15 2007 +0000
@@ -24,8 +24,6 @@
 #include <asm/cache.h>
 
 #include <xen/interface/elfnote.h>
-
-#define _PAGE_PRESENT 0x1
 
        .section .bootstrap.text, "ax", @progbits
        .code64
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c        Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c        Mon Mar 05 
11:14:15 2007 +0000
@@ -24,10 +24,13 @@ static inline void mm_walk_set_prot(void
 {
        struct page *page = virt_to_page(pt);
        unsigned long pfn = page_to_pfn(page);
-
-       BUG_ON(HYPERVISOR_update_va_mapping(
-                      (unsigned long)__va(pfn << PAGE_SHIFT),
-                      pfn_pte(pfn, flags), 0));
+       int rc;
+
+       rc = HYPERVISOR_update_va_mapping(
+               (unsigned long)__va(pfn << PAGE_SHIFT),
+               pfn_pte(pfn, flags), 0);
+       if (rc)
+               BUG();
 }
 
 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c
--- a/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c   Fri Dec 15 08:16:56 
2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c   Mon Mar 05 11:14:15 
2007 +0000
@@ -113,14 +113,13 @@ void __exit tpmif_exit(void);
 
 
 static inline int
-tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
+tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
                int isuserbuffer)
 {
        int copied = len;
 
-       if (len > txb->size) {
+       if (len > txb->size)
                copied = txb->size;
-       }
        if (isuserbuffer) {
                if (copy_from_user(txb->data, src, copied))
                        return -EFAULT;
@@ -133,18 +132,20 @@ tx_buffer_copy(struct tx_buffer *txb, co
 
 static inline struct tx_buffer *tx_buffer_alloc(void)
 {
-       struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
-                                       GFP_KERNEL);
-
-       if (txb) {
-               txb->len = 0;
-               txb->size = PAGE_SIZE;
-               txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-               if (txb->data == NULL) {
-                       kfree(txb);
-                       txb = NULL;
-               }
-       }
+       struct tx_buffer *txb;
+
+       txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
+       if (!txb)
+               return NULL;
+
+       txb->len = 0;
+       txb->size = PAGE_SIZE;
+       txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
+       if (txb->data == NULL) {
+               kfree(txb);
+               txb = NULL;
+       }
+
        return txb;
 }
 
@@ -160,37 +161,41 @@ static inline void tx_buffer_free(struct
 /**************************************************************
  Utility function for the tpm_private structure
 **************************************************************/
-static inline void tpm_private_init(struct tpm_private *tp)
+static void tpm_private_init(struct tpm_private *tp)
 {
        spin_lock_init(&tp->tx_lock);
        init_waitqueue_head(&tp->wait_q);
        atomic_set(&tp->refcnt, 1);
 }
 
-static inline void tpm_private_put(void)
-{
-       if ( atomic_dec_and_test(&my_priv->refcnt)) {
-               tpmif_free_tx_buffers(my_priv);
-               kfree(my_priv);
-               my_priv = NULL;
-       }
+static void tpm_private_put(void)
+{
+       if (!atomic_dec_and_test(&my_priv->refcnt))
+               return;
+
+       tpmif_free_tx_buffers(my_priv);
+       kfree(my_priv);
+       my_priv = NULL;
 }
 
 static struct tpm_private *tpm_private_get(void)
 {
        int err;
-       if (!my_priv) {
-               my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
-               if (my_priv) {
-                       tpm_private_init(my_priv);
-                       err = tpmif_allocate_tx_buffers(my_priv);
-                       if (err < 0) {
-                               tpm_private_put();
-                       }
-               }
-       } else {
+
+       if (my_priv) {
                atomic_inc(&my_priv->refcnt);
-       }
+               return my_priv;
+       }
+
+       my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
+       if (!my_priv)
+               return NULL;
+
+       tpm_private_init(my_priv);
+       err = tpmif_allocate_tx_buffers(my_priv);
+       if (err < 0)
+               tpm_private_put();
+
        return my_priv;
 }
 
@@ -379,10 +384,8 @@ static int tpmfront_probe(struct xenbus_
                return -ENOMEM;
 
        tp->chip = init_vtpm(&dev->dev, &tvd, tp);
-
-       if (IS_ERR(tp->chip)) {
+       if (IS_ERR(tp->chip))
                return PTR_ERR(tp->chip);
-       }
 
        err = xenbus_scanf(XBT_NIL, dev->nodename,
                           "handle", "%i", &handle);
@@ -401,6 +404,7 @@ static int tpmfront_probe(struct xenbus_
                tpm_private_put();
                return err;
        }
+
        return 0;
 }
 
@@ -417,30 +421,34 @@ static int tpmfront_suspend(struct xenbu
 {
        struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
        u32 ctr;
-       /* lock, so no app can send */
+
+       /* Take the lock, preventing any application from sending. */
        mutex_lock(&suspend_lock);
        tp->is_suspended = 1;
 
-       for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 300; ctr++) {
+       for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
                if ((ctr % 10) == 0)
                        printk("TPM-FE [INFO]: Waiting for outstanding "
                               "request.\n");
-               /*
-                * Wait for a request to be responded to.
-                */
+               /* Wait for a request to be responded to. */
                interruptible_sleep_on_timeout(&tp->wait_q, 100);
        }
-       xenbus_switch_state(dev, XenbusStateClosing);
-
-       if (atomic_read(&tp->tx_busy)) {
-               /*
-                * A temporary work-around.
-                */
-               printk("TPM-FE [WARNING]: Resetting busy flag.");
-               atomic_set(&tp->tx_busy, 0);
-       }
-
-       return 0;
+
+       return 0;
+}
+
+static int tpmfront_suspend_finish(struct tpm_private *tp)
+{
+       tp->is_suspended = 0;
+       /* Allow applications to send again. */
+       mutex_unlock(&suspend_lock);
+       return 0;
+}
+
+static int tpmfront_suspend_cancel(struct xenbus_device *dev)
+{
+       struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
+       return tpmfront_suspend_finish(tp);
 }
 
 static int tpmfront_resume(struct xenbus_device *dev)
@@ -484,6 +492,7 @@ static struct xenbus_driver tpmfront = {
        .resume = tpmfront_resume,
        .otherend_changed = backend_changed,
        .suspend = tpmfront_suspend,
+       .suspend_cancel = tpmfront_suspend_cancel,
 };
 
 static void __init init_tpm_xenbus(void)
@@ -514,9 +523,8 @@ static void tpmif_free_tx_buffers(struct
 {
        unsigned int i;
 
-       for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
+       for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
                tx_buffer_free(tp->tx_buffers[i]);
-       }
 }
 
 static void tpmif_rx_action(unsigned long priv)
@@ -536,9 +544,8 @@ static void tpmif_rx_action(unsigned lon
        received = tx->size;
 
        buffer = kmalloc(received, GFP_ATOMIC);
-       if (NULL == buffer) {
+       if (!buffer)
                goto exit;
-       }
 
        for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
                struct tx_buffer *txb = tp->tx_buffers[i];
@@ -547,9 +554,8 @@ static void tpmif_rx_action(unsigned lon
 
                tx = &tp->tx->ring[i].req;
                tocopy = tx->size;
-               if (tocopy > PAGE_SIZE) {
+               if (tocopy > PAGE_SIZE)
                        tocopy = PAGE_SIZE;
-               }
 
                memcpy(&buffer[offset], txb->data, tocopy);
 
@@ -607,12 +613,13 @@ static int tpm_xmit(struct tpm_private *
                struct tx_buffer *txb = tp->tx_buffers[i];
                int copied;
 
-               if (NULL == txb) {
+               if (!txb) {
                        DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
                                "Not transmitting anything!\n", i);
                        spin_unlock_irq(&tp->tx_lock);
                        return -EFAULT;
                }
+
                copied = tx_buffer_copy(txb, &buf[offset], count,
                                        isuserbuffer);
                if (copied < 0) {
@@ -624,25 +631,26 @@ static int tpm_xmit(struct tpm_private *
                offset += copied;
 
                tx = &tp->tx->ring[i].req;
-
                tx->addr = virt_to_machine(txb->data);
                tx->size = txb->len;
 
-               DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 
0x%02x 0x%02x\n",
+               DPRINTK("First 4 characters sent by TPM-FE are "
+                       "0x%02x 0x%02x 0x%02x 0x%02x\n",
                        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
 
-               /* get the granttable reference for this page */
+               /* Get the granttable reference for this page. */
                tx->ref = gnttab_claim_grant_reference(&gref_head);
-
-               if (-ENOSPC == tx->ref) {
+               if (tx->ref == -ENOSPC) {
                        spin_unlock_irq(&tp->tx_lock);
-                       DPRINTK(" Grant table claim reference failed in func:%s 
line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
+                       DPRINTK("Grant table claim reference failed in "
+                               "func:%s line:%d file:%s\n",
+                               __FUNCTION__, __LINE__, __FILE__);
                        return -ENOSPC;
                }
-               gnttab_grant_foreign_access_ref( tx->ref,
-                                                tp->backend_id,
-                                                virt_to_mfn(txb->data),
-                                                0 /*RW*/);
+               gnttab_grant_foreign_access_ref(tx->ref,
+                                               tp->backend_id,
+                                               virt_to_mfn(txb->data),
+                                               0 /*RW*/);
                wmb();
        }
 
@@ -660,15 +668,10 @@ static int tpm_xmit(struct tpm_private *
 
 static void tpmif_notify_upperlayer(struct tpm_private *tp)
 {
-       /*
-        * Notify upper layer about the state of the connection
-        * to the BE.
-        */
-       if (tp->is_connected) {
-               vtpm_vd_status(tp->chip, TPM_VD_STATUS_CONNECTED);
-       } else {
-               vtpm_vd_status(tp->chip, TPM_VD_STATUS_DISCONNECTED);
-       }
+       /* Notify upper layer about the state of the connection to the BE. */
+       vtpm_vd_status(tp->chip, (tp->is_connected
+                                 ? TPM_VD_STATUS_CONNECTED
+                                 : TPM_VD_STATUS_DISCONNECTED));
 }
 
 
@@ -679,20 +682,16 @@ static void tpmif_set_connected_state(st
         * should disconnect - assumption is that we will resume
         * The mutex keeps apps from sending.
         */
-       if (is_connected == 0 && tp->is_suspended == 1) {
+       if (is_connected == 0 && tp->is_suspended == 1)
                return;
-       }
 
        /*
         * Unlock the mutex if we are connected again
         * after being suspended - now resuming.
         * This also removes the suspend state.
         */
-       if (is_connected == 1 && tp->is_suspended == 1) {
-               tp->is_suspended = 0;
-               /* unlock, so apps can resume sending */
-               mutex_unlock(&suspend_lock);
-       }
+       if (is_connected == 1 && tp->is_suspended == 1)
+               tpmfront_suspend_finish(tp);
 
        if (is_connected != tp->is_connected) {
                tp->is_connected = is_connected;
@@ -710,33 +709,24 @@ static void tpmif_set_connected_state(st
 
 static int __init tpmif_init(void)
 {
-       long rc = 0;
        struct tpm_private *tp;
 
        if (is_initial_xendomain())
                return -EPERM;
 
        tp = tpm_private_get();
-       if (!tp) {
-               rc = -ENOMEM;
-               goto failexit;
-       }
+       if (!tp)
+               return -ENOMEM;
 
        IPRINTK("Initialising the vTPM driver.\n");
-       if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
-                                            &gref_head ) < 0) {
-               rc = -EFAULT;
-               goto gnttab_alloc_failed;
+       if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
+                                         &gref_head) < 0) {
+               tpm_private_put();
+               return -EFAULT;
        }
 
        init_tpm_xenbus();
        return 0;
-
-gnttab_alloc_failed:
-       tpm_private_put();
-failexit:
-
-       return (int)rc;
 }
 
 
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c    Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c    Mon Mar 05 
11:14:15 2007 +0000
@@ -59,23 +59,6 @@ EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_halt);
 EXPORT_SYMBOL(machine_power_off);
 
-/* Ensure we run on the idle task page tables so that we will
-   switch page tables before running user space. This is needed
-   on architectures with separate kernel and user page tables
-   because the user page table pointer is not saved/restored. */
-static void switch_idle_mm(void)
-{
-       struct mm_struct *mm = current->active_mm;
-
-       if (mm == &init_mm)
-               return;
-
-       atomic_inc(&init_mm.mm_count);
-       switch_mm(mm, &init_mm, current);
-       current->active_mm = &init_mm;
-       mmdrop(mm);
-}
-
 static void pre_suspend(void)
 {
        HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
@@ -99,7 +82,9 @@ static void post_suspend(int suspend_can
                xen_start_info->console.domU.mfn =
                        pfn_to_mfn(xen_start_info->console.domU.mfn);
        } else {
+#ifdef CONFIG_SMP
                cpu_initialized_map = cpumask_of_cpu(0);
+#endif
        }
        
        set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
@@ -172,10 +157,25 @@ static int take_machine_down(void *p_fas
 
        post_suspend(suspend_cancelled);
        gnttab_resume();
-       if (!suspend_cancelled)
+       if (!suspend_cancelled) {
                irq_resume();
+#ifdef __x86_64__
+               /*
+                * Older versions of Xen do not save/restore the user %cr3.
+                * We do it here just in case, but there's no need if we are
+                * in fast-suspend mode as that implies a new enough Xen.
+                */
+               if (!fast_suspend) {
+                       struct mmuext_op op;
+                       op.cmd = MMUEXT_NEW_USER_BASEPTR;
+                       op.arg1.mfn = pfn_to_mfn(__pa(__user_pgd(
+                               current->active_mm->pgd)) >> PAGE_SHIFT);
+                       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+                               BUG();
+               }
+#endif
+       }
        time_resume();
-       switch_idle_mm();
        local_irq_enable();
 
        if (fast_suspend && !suspend_cancelled) {
@@ -210,6 +210,10 @@ int __xen_suspend(int fast_suspend)
        }
 #endif
 
+       /* If we are definitely UP then 'slow mode' is actually faster. */
+       if (num_possible_cpus() == 1)
+               fast_suspend = 0;
+
        if (fast_suspend) {
                xenbus_suspend();
                err = stop_machine_run(take_machine_down, &fast_suspend, 0);
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Mar 05 
11:14:15 2007 +0000
@@ -40,6 +40,9 @@
 
 /*#define NETBE_DEBUG_INTERRUPT*/
 
+/* extra field used in struct page */
+#define netif_page_index(pg) (*(long *)&(pg)->mapping)
+
 struct netbk_rx_meta {
        skb_frag_t frag;
        int id;
@@ -352,7 +355,7 @@ static u16 netbk_gop_frag(netif_t *netif
                copy_gop->flags = GNTCOPY_dest_gref;
                if (PageForeign(page)) {
                        struct pending_tx_info *src_pend =
-                               &pending_tx_info[page->index];
+                               &pending_tx_info[netif_page_index(page)];
                        copy_gop->source.domid = src_pend->netif->domid;
                        copy_gop->source.u.ref = src_pend->req.gref;
                        copy_gop->flags |= GNTCOPY_source_gref;
@@ -1327,7 +1330,7 @@ static void netif_page_release(struct pa
        /* Ready for next use. */
        init_page_count(page);
 
-       netif_idx_release(page->index);
+       netif_idx_release(netif_page_index(page));
 }
 
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
@@ -1457,7 +1460,7 @@ static int __init netback_init(void)
        for (i = 0; i < MAX_PENDING_REQS; i++) {
                page = mmap_pages[i];
                SetPageForeign(page, netif_page_release);
-               page->index = i;
+               netif_page_index(page) = i;
        }
 
        pending_cons = 0;
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Mon Mar 05 
11:14:15 2007 +0000
@@ -173,17 +173,22 @@ static ssize_t xenbus_dev_write(struct f
        void *reply;
        char *path, *token;
        struct watch_adapter *watch, *tmp_watch;
-       int err;
-
-       if ((len + u->len) > sizeof(u->u.buffer))
-               return -EINVAL;
-
-       if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0)
-               return -EFAULT;
+       int err, rc = len;
+
+       if ((len + u->len) > sizeof(u->u.buffer)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
+               rc = -EFAULT;
+               goto out;
+       }
 
        u->len += len;
-       if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
-               return len;
+       if ((u->len < sizeof(u->u.msg)) ||
+           (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
+               return rc;
 
        msg_type = u->u.msg.type;
 
@@ -201,14 +206,17 @@ static ssize_t xenbus_dev_write(struct f
        case XS_SET_PERMS:
                if (msg_type == XS_TRANSACTION_START) {
                        trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-                       if (!trans)
-                               return -ENOMEM;
+                       if (!trans) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
                }
 
                reply = xenbus_dev_request_and_reply(&u->u.msg);
                if (IS_ERR(reply)) {
                        kfree(trans);
-                       return PTR_ERR(reply);
+                       rc = PTR_ERR(reply);
+                       goto out;
                }
 
                if (msg_type == XS_TRANSACTION_START) {
@@ -231,8 +239,10 @@ static ssize_t xenbus_dev_write(struct f
        case XS_UNWATCH:
                path = u->u.buffer + sizeof(u->u.msg);
                token = memchr(path, 0, u->u.msg.len);
-               if (token == NULL)
-                       return -EILSEQ;
+               if (token == NULL) {
+                       rc = -EILSEQ;
+                       goto out;
+               }
                token++;
 
                if (msg_type == XS_WATCH) {
@@ -251,7 +261,8 @@ static ssize_t xenbus_dev_write(struct f
                        err = register_xenbus_watch(&watch->watch);
                        if (err) {
                                free_watch_adapter(watch);
-                               return err;
+                               rc = err;
+                               goto out;
                        }
                        
                        list_add(&watch->list, &u->watches);
@@ -265,7 +276,6 @@ static ssize_t xenbus_dev_write(struct f
                                                  &u->watches, list) {
                                if (!strcmp(watch->token, token) &&
                                    !strcmp(watch->watch.node, path))
-                                       break;
                                {
                                        unregister_xenbus_watch(&watch->watch);
                                        list_del(&watch->list);
@@ -278,11 +288,13 @@ static ssize_t xenbus_dev_write(struct f
                break;
 
        default:
-               return -EINVAL;
-       }
-
+               rc = -EINVAL;
+               break;
+       }
+
+ out:
        u->len = 0;
-       return len;
+       return rc;
 }
 
 static int xenbus_dev_open(struct inode *inode, struct file *filp)
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h
--- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h Fri Dec 15 
08:16:56 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h Mon Mar 05 
11:14:15 2007 +0000
@@ -20,6 +20,14 @@
 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
 
 #ifdef __KERNEL__
+
+/*
+ * Need to repeat this here in order to not include pgtable.h (which in turn
+ * depends on definitions made here), but to be able to use the symbolic
+ * below. The preprocessor will warn if the two definitions aren't identical.
+ */
+#define _PAGE_PRESENT  0x001
+
 #ifndef __ASSEMBLY__
 
 #include <linux/string.h>
@@ -28,13 +36,6 @@
 #include <asm/bug.h>
 #include <xen/interface/xen.h>
 #include <xen/features.h>
-
-/*
- * Need to repeat this here in order to not include pgtable.h (which in turn
- * depends on definitions made here), but to be able to use the symbolic
- * below. The preprocessor will warn if the two definitions aren't identical.
- */
-#define _PAGE_PRESENT  0x001
 
 #define arch_free_page(_page,_order)           \
 ({     int foreign = PageForeign(_page);       \
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/include/linux/page-flags.h
--- a/linux-2.6-xen-sparse/include/linux/page-flags.h   Fri Dec 15 08:16:56 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/linux/page-flags.h   Mon Mar 05 11:14:15 
2007 +0000
@@ -252,14 +252,14 @@
 #define PageForeign(page)      test_bit(PG_foreign, &(page)->flags)
 #define SetPageForeign(page, dtor) do {                \
        set_bit(PG_foreign, &(page)->flags);    \
-       (page)->mapping = (void *)dtor;         \
+       (page)->index = (long)(dtor);           \
 } while (0)
 #define ClearPageForeign(page) do {            \
        clear_bit(PG_foreign, &(page)->flags);  \
-       (page)->mapping = NULL;                 \
+       (page)->index = 0;                      \
 } while (0)
 #define PageForeignDestructor(page)            \
-       ( (void (*) (struct page *)) (page)->mapping )(page)
+       ( (void (*) (struct page *)) (page)->index )(page)
 
 struct page;   /* forward declaration */
 
diff -r 5a2b3a1b1f63 -r e74bfc744717 
linux-2.6-xen-sparse/include/xen/cpu_hotplug.h
--- a/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h    Fri Dec 15 08:16:56 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h    Mon Mar 05 11:14:15 
2007 +0000
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/cpumask.h>
 
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86) && defined(CONFIG_SMP)
 extern cpumask_t cpu_initialized_map;
 #define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map)
 #else
diff -r 5a2b3a1b1f63 -r e74bfc744717 linux-2.6-xen-sparse/mm/Kconfig
--- a/linux-2.6-xen-sparse/mm/Kconfig   Fri Dec 15 08:16:56 2006 -0500
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-config SELECT_MEMORY_MODEL
-       def_bool y
-       depends on EXPERIMENTAL || ARCH_SELECT_MEMORY_MODEL
-
-choice
-       prompt "Memory model"
-       depends on SELECT_MEMORY_MODEL
-       default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
-       default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
-       default FLATMEM_MANUAL
-
-config FLATMEM_MANUAL
-       bool "Flat Memory"
-       depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || 
ARCH_FLATMEM_ENABLE
-       help
-         This option allows you to change some of the ways that
-         Linux manages its memory internally.  Most users will
-         only have one option here: FLATMEM.  This is normal
-         and a correct option.
-
-         Some users of more advanced features like NUMA and
-         memory hotplug may have different options here.
-         DISCONTIGMEM is an more mature, better tested system,
-         but is incompatible with memory hotplug and may suffer
-         decreased performance over SPARSEMEM.  If unsure between
-         "Sparse Memory" and "Discontiguous Memory", choose
-         "Discontiguous Memory".
-
-         If unsure, choose this option (Flat Memory) over any other.
-
-config DISCONTIGMEM_MANUAL
-       bool "Discontiguous Memory"
-       depends on ARCH_DISCONTIGMEM_ENABLE
-       help
-         This option provides enhanced support for discontiguous
-         memory systems, over FLATMEM.  These systems have holes
-         in their physical address spaces, and this option provides
-         more efficient handling of these holes.  However, the vast
-         majority of hardware has quite flat address spaces, and
-         can have degraded performance from extra overhead that
-         this option imposes.
-
-         Many NUMA configurations will have this as the only option.
-
-         If unsure, choose "Flat Memory" over this option.
-
-config SPARSEMEM_MANUAL
-       bool "Sparse Memory"
-       depends on ARCH_SPARSEMEM_ENABLE
-       help
-         This will be the only option for some systems, including
-         memory hotplug systems.  This is normal.
-
-         For many other systems, this will be an alternative to
-         "Discontiguous Memory".  This option provides some potential
-         performance benefits, along with decreased code complexity,
-         but it is newer, and more experimental.
-
-         If unsure, choose "Discontiguous Memory" or "Flat Memory"
-         over this option.
-
-endchoice
-
-config DISCONTIGMEM
-       def_bool y
-       depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || 
DISCONTIGMEM_MANUAL
-
-config SPARSEMEM
-       def_bool y
-       depends on SPARSEMEM_MANUAL
-
-config FLATMEM
-       def_bool y
-       depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
-
-config FLAT_NODE_MEM_MAP
-       def_bool y
-       depends on !SPARSEMEM
-
-#
-# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
-# to represent different areas of memory.  This variable allows
-# those dependencies to exist individually.
-#
-config NEED_MULTIPLE_NODES
-       def_bool y
-       depends on DISCONTIGMEM || NUMA
-
-config HAVE_MEMORY_PRESENT
-       def_bool y
-       depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
-
-#
-# SPARSEMEM_EXTREME (which is the default) does some bootmem
-# allocations when memory_present() is called.  If this can not
-# be done on your architecture, select this option.  However,
-# statically allocating the mem_section[] array can potentially
-# consume vast quantities of .bss, so be careful.
-#
-# This option will also potentially produce smaller runtime code
-# with gcc 3.4 and later.
-#
-config SPARSEMEM_STATIC
-       def_bool n
-
-#
-# Architectecture platforms which require a two level mem_section in SPARSEMEM
-# must select this option. This is usually for architecture platforms with
-# an extremely sparse physical address space.
-#
-config SPARSEMEM_EXTREME
-       def_bool y
-       depends on SPARSEMEM && !SPARSEMEM_STATIC
-
-# eventually, we can have this option just 'select SPARSEMEM'
-config MEMORY_HOTPLUG
-       bool "Allow for memory hot-add"
-       depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND && 
ARCH_ENABLE_MEMORY_HOTPLUG
-       depends on (IA64 || X86 || PPC64)
-
-comment "Memory hotplug is currently incompatible with Software Suspend"
-       depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
-
-# Heavily threaded applications may benefit from splitting the mm-wide
-# page_table_lock, so that faults on different parts of the user address
-# space can be handled with less contention: split it at this NR_CPUS.
-# Default to 4 for wider testing, though 8 might be more appropriate.
-# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
-# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
-# XEN on x86 architecture uses the mapping field on pagetable pages to store a
-# pointer to the destructor. This conflicts with pte_lock_deinit().
-#
-config SPLIT_PTLOCK_CPUS
-       int
-       default "4096" if ARM && !CPU_CACHE_VIPT
-       default "4096" if PARISC && !PA20
-       default "4096" if X86_XEN || X86_64_XEN
-       default "4"
-
-#
-# support for page migration
-#
-config MIGRATION
-       bool "Page migration"
-       def_bool y
-       depends on NUMA
-       help
-         Allows the migration of the physical location of pages of processes
-         while the virtual addresses are not changed. This is useful for
-         example on NUMA systems to put pages nearer to the processors 
accessing
-         the page.
-
-config RESOURCES_64BIT
-       bool "64 bit Memory and IO resources (EXPERIMENTAL)" if (!64BIT && 
EXPERIMENTAL)
-       default 64BIT
-       help
-         This option allows memory and IO resources to be 64 bit.
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/libxc/xc_dom_core.c
--- a/tools/libxc/xc_dom_core.c Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/libxc/xc_dom_core.c Mon Mar 05 11:14:15 2007 +0000
@@ -721,9 +721,6 @@ int xc_dom_build_image(struct xc_dom_ima
     }
     page_size = XC_DOM_PAGE_SIZE(dom);
 
-    /* 4MB align virtual base address */
-    dom->parms.virt_base &= ~(((uint64_t)1<<22)-1);
-
     /* load kernel */
     if ( xc_dom_alloc_segment(dom, &dom->kernel_seg, "kernel",
                               dom->kernel_seg.vstart,
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c    Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/libxc/xc_linux_restore.c    Mon Mar 05 11:14:15 2007 +0000
@@ -19,7 +19,7 @@ static unsigned long max_mfn;
 /* virtual starting address of the hypervisor */
 static unsigned long hvirt_start;
 
-/* #levels of page tables used by the currrent guest */
+/* #levels of page tables used by the current guest */
 static unsigned int pt_levels;
 
 /* total number of pages used by the current guest */
@@ -857,6 +857,28 @@ int xc_linux_restore(int xc_handle, int 
 
         ctxt.ctrlreg[3] = xen_pfn_to_cr3(p2m[pfn]);
 
+        /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
+        if ( (pt_levels == 4) && ctxt.ctrlreg[1] )
+        {
+            pfn = xen_cr3_to_pfn(ctxt.ctrlreg[1]);
+
+            if (pfn >= max_pfn) {
+                ERROR("User PT base is bad: pfn=%lu max_pfn=%lu type=%08lx",
+                      pfn, max_pfn, pfn_type[pfn]);
+                goto out;
+            }
+
+            if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
+                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) ) {
+                ERROR("User PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
+                      pfn, max_pfn, pfn_type[pfn],
+                      (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
+                goto out;
+            }
+
+            ctxt.ctrlreg[1] = xen_pfn_to_cr3(p2m[pfn]);
+        }
+
         domctl.cmd = XEN_DOMCTL_setvcpucontext;
         domctl.domain = (domid_t)dom;
         domctl.u.vcpucontext.vcpu = i;
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c       Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/libxc/xc_linux_save.c       Mon Mar 05 11:14:15 2007 +0000
@@ -34,7 +34,7 @@ static unsigned long max_mfn;
 /* virtual starting address of the hypervisor */
 static unsigned long hvirt_start;
 
-/* #levels of page tables used by the currrent guest */
+/* #levels of page tables used by the current guest */
 static unsigned int pt_levels;
 
 /* total number of pages used by the current guest */
@@ -491,7 +491,7 @@ static int canonicalize_pagetable(unsign
     ** reserved hypervisor mappings. This depends on the current
     ** page table type as well as the number of paging levels.
     */
-    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
+    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2) ? 4 : 8);
 
     if (pt_levels == 2 && type == XEN_DOMCTL_PFINFO_L2TAB)
         xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
@@ -1279,6 +1279,18 @@ int xc_linux_save(int xc_handle, int io_
         ctxt.ctrlreg[3] = 
             xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[3])));
 
+        /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
+        if ( (pt_levels == 4) && ctxt.ctrlreg[1] )
+        {
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(xen_cr3_to_pfn(ctxt.ctrlreg[1])) ) {
+                ERROR("PT base is not in range of pseudophys map");
+                goto out;
+            }
+            /* Least-significant bit means 'valid PFN'. */
+            ctxt.ctrlreg[1] = 1 |
+                xen_pfn_to_cr3(mfn_to_pfn(xen_cr3_to_pfn(ctxt.ctrlreg[1])));
+        }
+
         if (!write_exact(io_fd, &ctxt, sizeof(ctxt))) {
             ERROR("Error when writing to state file (1) (errno %d)", errno);
             goto out;
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/python/xen/xend/XendDomainInfo.py   Mon Mar 05 11:14:15 2007 +0000
@@ -1503,7 +1503,7 @@ class XendDomainInfo:
             self.info['start_time'] = time.time()
 
             self._stateSet(DOM_STATE_RUNNING)
-        except RuntimeError, exn:
+        except (RuntimeError, VmError), exn:
             log.exception("XendDomainInfo.initDomain: exception occurred")
             self.image.cleanupBootloading()
             raise VmError(str(exn))
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/python/xen/xend/XendNode.py Mon Mar 05 11:14:15 2007 +0000
@@ -97,6 +97,10 @@ class XendNode:
         for u in self.cpus.keys():
             log.error(self.cpus[u])
             number = self.cpus[u]['number']
+            # We can run off the end of the cpuinfo list if domain0 does not
+            # have #vcpus == #pcpus. In that case we just replicate pcpu0 info.
+            if not cpuinfo.has_key(number):
+                number = 0
             log.error(number)
             log.error(cpuinfo)
             self.cpus[u].update(
diff -r 5a2b3a1b1f63 -r e74bfc744717 tools/xenfb/xenfb.c
--- a/tools/xenfb/xenfb.c       Fri Dec 15 08:16:56 2006 -0500
+++ b/tools/xenfb/xenfb.c       Mon Mar 05 11:14:15 2007 +0000
@@ -245,11 +245,10 @@ static int xenfb_wait_for_state(struct x
        unsigned state, dummy;
        char **vec;
 
+       awaited |= 1 << XenbusStateUnknown;
+
        for (;;) {
                state = xenfb_read_state(xsh, dir);
-               if (state < 0)
-                       return -1;
-
                if ((1 << state) & awaited)
                        return state;
 
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/Rules.mk
--- a/xen/Rules.mk      Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/Rules.mk      Mon Mar 05 11:14:15 2007 +0000
@@ -41,8 +41,8 @@ include $(BASEDIR)/arch/$(TARGET_ARCH)/R
 include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk
 
 # Do not depend on auto-generated header files.
-HDRS := $(subst $(BASEDIR)/include/asm-$(TARGET_ARCH)/asm-offsets.h,,$(HDRS))
-HDRS := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS))
+AHDRS := $(filter-out %/include/xen/compile.h,$(HDRS))
+HDRS  := $(filter-out %/asm-offsets.h,$(AHDRS))
 
 # Note that link order matters!
 ALL_OBJS-y               += $(BASEDIR)/common/built_in.o
@@ -110,12 +110,12 @@ _clean_%/: FORCE
 %.o: %.c $(HDRS) Makefile
        $(CC) $(CFLAGS) -c $< -o $@
 
-%.o: %.S $(HDRS) Makefile
+%.o: %.S $(AHDRS) Makefile
        $(CC) $(AFLAGS) -c $< -o $@
 
 %.i: %.c $(HDRS) Makefile
        $(CPP) $(CFLAGS) $< -o $@
 
 # -std=gnu{89,99} gets confused by # as an end-of-line comment marker
-%.s: %.S $(HDRS) Makefile
+%.s: %.S $(AHDRS) Makefile
        $(CPP) $(AFLAGS) $< -o $@
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/arch/x86/domain.c     Mon Mar 05 11:14:15 2007 +0000
@@ -641,6 +641,31 @@ int arch_set_info_guest(
             }
 
             v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
+
+#ifdef __x86_64__
+            if ( c.nat->ctrlreg[1] )
+            {
+                cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
+
+                if ( !mfn_valid(cr3_pfn) ||
+                     (paging_mode_refcounts(d)
+                      ? !get_page(mfn_to_page(cr3_pfn), d)
+                      : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+                                           PGT_base_page_table)) )
+                {
+                    cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
+                    v->arch.guest_table = pagetable_null();
+                    if ( paging_mode_refcounts(d) )
+                        put_page(mfn_to_page(cr3_pfn));
+                    else
+                        put_page_and_type(mfn_to_page(cr3_pfn));
+                    destroy_gdt(v);
+                    return -EINVAL;
+                }
+
+                v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
+            }
+#endif
         }
 #ifdef CONFIG_COMPAT
         else
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/arch/x86/domain_build.c       Mon Mar 05 11:14:15 2007 +0000
@@ -374,9 +374,6 @@ int construct_dom0(struct domain *d,
     if ( parms.f_required[0] /* Huh? -- kraxel */ )
             panic("Domain 0 requires an unsupported hypervisor feature.\n");
 
-    /* Align load address to 4MB boundary. */
-    v_start = parms.virt_base & ~((1UL<<22)-1);
-
     /*
      * Why do we need this? The number of page-table frames depends on the 
      * size of the bootstrap address space. But the size of the address space 
@@ -384,6 +381,7 @@ int construct_dom0(struct domain *d,
      * read-only). We have a pair of simultaneous equations in two unknowns, 
      * which we solve by exhaustive search.
      */
+    v_start          = parms.virt_base;
     vkern_start      = parms.virt_kstart;
     vkern_end        = parms.virt_kend;
     vinitrd_start    = round_pgup(vkern_end);
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/arch/x86/domctl.c     Mon Mar 05 11:14:15 2007 +0000
@@ -470,8 +470,15 @@ void arch_get_info_guest(struct vcpu *v,
         c(user_regs.eflags |= v->arch.iopl << 12);
 
         if ( !IS_COMPAT(v->domain) )
+        {
             c.nat->ctrlreg[3] = xen_pfn_to_cr3(
                 pagetable_get_pfn(v->arch.guest_table));
+#ifdef __x86_64__
+            if ( !pagetable_is_null(v->arch.guest_table_user) )
+                c.nat->ctrlreg[1] = xen_pfn_to_cr3(
+                    pagetable_get_pfn(v->arch.guest_table_user));
+#endif
+        }
 #ifdef CONFIG_COMPAT
         else
         {
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Mar 05 11:14:15 2007 +0000
@@ -2912,7 +2912,16 @@ void sh_mark_dirty(struct domain *d, mfn
      * can be called from __hvm_copy during emulation).
      * If the lock isn't held, take it for the duration of the call. */
     do_locking = !shadow_locked_by_me(d);
-    if ( do_locking ) shadow_lock(d);
+    if ( do_locking ) 
+    { 
+        shadow_lock(d);
+        /* Check the mode again with the lock held */ 
+        if ( unlikely(!shadow_mode_log_dirty(d)) )
+        {
+            shadow_unlock(d);
+            return;
+        }
+    }
 
     ASSERT(d->arch.paging.shadow.dirty_bitmap != NULL);
 
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/drivers/acpi/numa.c
--- a/xen/drivers/acpi/numa.c   Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/drivers/acpi/numa.c   Mon Mar 05 11:14:15 2007 +0000
@@ -22,10 +22,6 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
-#if 0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#endif
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/types.h>
@@ -34,7 +30,6 @@
 #include <xen/numa.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acmacros.h>
-#include <asm/page.h> /* __va() */
 
 #define ACPI_NUMA      0x80000000
 #define _COMPONENT     ACPI_NUMA
@@ -106,7 +101,7 @@ static int __init acpi_parse_slit(unsign
        if (!phys_addr || !size)
                return -EINVAL;
 
-       slit = (struct acpi_table_slit *)__va(phys_addr);
+       slit = (struct acpi_table_slit *)__acpi_map_table(phys_addr, size);
 
        /* downcast just for %llu vs %lu for i386/ia64  */
        localities = (u32) slit->localities;
@@ -159,7 +154,7 @@ static int __init acpi_parse_srat(unsign
        if (!phys_addr || !size)
                return -EINVAL;
 
-       srat = (struct acpi_table_srat *)__va(phys_addr);
+       srat = (struct acpi_table_srat *)__acpi_map_table(phys_addr, size);
 
        return 0;
 }
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/include/acm/acm_hooks.h
--- a/xen/include/acm/acm_hooks.h       Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/include/acm/acm_hooks.h       Mon Mar 05 11:14:15 2007 +0000
@@ -247,12 +247,12 @@ static inline int acm_pre_domctl(struct 
             if (*ssid == NULL) {
                 printk("%s: Warning. Destroying domain without ssid 
pointer.\n", 
                        __func__);
-                domain_rcu_lock(d);
+                rcu_unlock_domain(d);
                 return -EACCES;
             }
             d->ssid = NULL; /* make sure it's not used any more */
              /* no policy-specific hook */
-            domain_rcu_lock(d);
+            rcu_unlock_domain(d);
             ret = 0;
         }
         break;
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/include/public/arch-x86/xen.h
--- a/xen/include/public/arch-x86/xen.h Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/include/public/arch-x86/xen.h Mon Mar 05 11:14:15 2007 +0000
@@ -132,6 +132,7 @@ struct vcpu_guest_context {
     unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
     unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
     unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
+    /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
     unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
     unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
 #ifdef __i386__
diff -r 5a2b3a1b1f63 -r e74bfc744717 xen/include/public/xen.h
--- a/xen/include/public/xen.h  Fri Dec 15 08:16:56 2006 -0500
+++ b/xen/include/public/xen.h  Mon Mar 05 11:14:15 2007 +0000
@@ -473,26 +473,24 @@ typedef struct shared_info shared_info_t
 #endif
 
 /*
- * Start-of-day memory layout for the initial domain (DOM0):
+ * Start-of-day memory layout:
  *  1. The domain is started within contiguous virtual-memory region.
- *  2. The contiguous region begins and ends on an aligned 4MB boundary.
- *  3. The region start corresponds to the load address of the OS image.
- *     If the load address is not 4MB aligned then the address is rounded down.
- *  4. This the order of bootstrap elements in the initial virtual region:
+ *  2. The contiguous region ends on an aligned 4MB boundary.
+ *  3. This the order of bootstrap elements in the initial virtual region:
  *      a. relocated kernel image
  *      b. initial ram disk              [mod_start, mod_len]
  *      c. list of allocated page frames [mfn_list, nr_pages]
  *      d. start_info_t structure        [register ESI (x86)]
  *      e. bootstrap page tables         [pt_base, CR3 (x86)]
  *      f. bootstrap stack               [register ESP (x86)]
- *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
- *  6. The initial ram disk may be omitted.
- *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ *  4. Bootstrap elements are packed together, but each is 4kB-aligned.
+ *  5. The initial ram disk may be omitted.
+ *  6. The list of page frames forms a contiguous 'pseudo-physical' memory
  *     layout for the domain. In particular, the bootstrap virtual-memory
  *     region is a 1:1 mapping to the first section of the pseudo-physical map.
- *  8. All bootstrap elements are mapped read-writable for the guest OS. The
+ *  7. All bootstrap elements are mapped read-writable for the guest OS. The
  *     only exception is the bootstrap page table, which is mapped read-only.
- *  9. There is guaranteed to be at least 512kB padding after the final
+ *  8. There is guaranteed to be at least 512kB padding after the final
  *     bootstrap element. If necessary, the bootstrap virtual region is
  *     extended by an extra 4MB to ensure this.
  */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.