[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge with xenppc-unstable-merge.hg.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 50aea0ec406bd03dad9da31255857e4a41f7efcc
# Parent  e01441c9a607d3364dc092f186e0d925a3d4d8d4
# Parent  74db626d2fcfde3e61f1f43934eddd034d64a4a9
Merge with xenppc-unstable-merge.hg.
---
 linux-2.6-xen-sparse/arch/ia64/dig/setup.c           |    2 
 linux-2.6-xen-sparse/arch/ia64/kernel/setup.c        |    2 
 linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c      |   27 
 linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c  |    6 
 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c |   60 -
 linux-2.6-xen-sparse/include/asm-ia64/agp.h          |   12 
 linux-2.6-xen-sparse/include/asm-ia64/dma-mapping.h  |   20 
 linux-2.6-xen-sparse/include/asm-ia64/hypercall.h    |  165 ---
 linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h   |   38 
 linux-2.6-xen-sparse/include/asm-ia64/io.h           |    6 
 linux-2.6-xen-sparse/include/asm-ia64/machvec.h      |    2 
 linux-2.6-xen-sparse/include/asm-ia64/maddr.h        |   88 +
 linux-2.6-xen-sparse/include/asm-ia64/page.h         |   69 -
 linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h      |    4 
 tools/debugger/gdb/gdbbuild                          |    4 
 tools/examples/xmexample.vti                         |   32 
 tools/libxc/ia64/xc_ia64_hvm_build.c                 |   13 
 tools/libxc/ia64/xc_ia64_linux_save.c                |    2 
 tools/libxc/ia64/xc_ia64_stubs.c                     |   16 
 xen/arch/ia64/asm-offsets.c                          |    1 
 xen/arch/ia64/linux-xen/efi.c                        |   22 
 xen/arch/ia64/linux-xen/iosapic.c                    |    2 
 xen/arch/ia64/linux-xen/irq_ia64.c                   |   10 
 xen/arch/ia64/linux-xen/mca.c                        |    1 
 xen/arch/ia64/linux-xen/process-linux-xen.c          |    5 
 xen/arch/ia64/linux-xen/smp.c                        |    4 
 xen/arch/ia64/linux-xen/unwind.c                     |    3 
 xen/arch/ia64/vmx/Makefile                           |    2 
 xen/arch/ia64/vmx/mmio.c                             |   17 
 xen/arch/ia64/vmx/vmmu.c                             |    1 
 xen/arch/ia64/vmx/vmx_hypercall.c                    |   12 
 xen/arch/ia64/vmx/vmx_init.c                         |   47 
 xen/arch/ia64/vmx/vmx_phy_mode.c                     |    3 
 xen/arch/ia64/vmx/vmx_process.c                      |    8 
 xen/arch/ia64/vmx/vmx_support.c                      |  136 +-
 xen/arch/ia64/vmx/vtlb.c                             |  101 -
 xen/arch/ia64/xen/acpi.c                             |    3 
 xen/arch/ia64/xen/dom0_ops.c                         |    5 
 xen/arch/ia64/xen/dom_fw.c                           |  967 ++++++++---------
 xen/arch/ia64/xen/domain.c                           |   49 
 xen/arch/ia64/xen/faults.c                           |    7 
 xen/arch/ia64/xen/flushd.S                           |   11 
 xen/arch/ia64/xen/fw_emul.c                          |  165 ++-
 xen/arch/ia64/xen/hypercall.c                        |  130 +-
 xen/arch/ia64/xen/hyperprivop.S                      |   81 -
 xen/arch/ia64/xen/irq.c                              |    4 
 xen/arch/ia64/xen/ivt.S                              | 1035 ++++++++++---------
 xen/arch/ia64/xen/mm.c                               |    7 
 xen/arch/ia64/xen/pcdp.c                             |    1 
 xen/arch/ia64/xen/privop.c                           |    4 
 xen/arch/ia64/xen/privop_stat.c                      |  113 +-
 xen/arch/ia64/xen/vcpu.c                             |   31 
 xen/arch/ia64/xen/vhpt.c                             |   18 
 xen/arch/ia64/xen/xen.lds.S                          |    8 
 xen/arch/ia64/xen/xenasm.S                           |   22 
 xen/arch/ia64/xen/xensetup.c                         |   12 
 xen/arch/ia64/xen/xentime.c                          |    4 
 xen/arch/x86/physdev.c                               |    5 
 xen/include/asm-ia64/config.h                        |    6 
 xen/include/asm-ia64/dom_fw.h                        |   56 -
 xen/include/asm-ia64/domain.h                        |   14 
 xen/include/asm-ia64/grant_table.h                   |    2 
 xen/include/asm-ia64/linux-xen/asm/processor.h       |    8 
 xen/include/asm-ia64/linux-xen/linux/efi.h           |    4 
 xen/include/asm-ia64/multicall.h                     |    8 
 xen/include/asm-ia64/perfc.h                         |   12 
 xen/include/asm-ia64/perfc_defn.h                    |   17 
 xen/include/asm-ia64/privop_stat.h                   |   43 
 xen/include/asm-ia64/uaccess.h                       |   18 
 xen/include/asm-ia64/vcpu.h                          |   17 
 xen/include/asm-ia64/vhpt.h                          |    2 
 xen/include/asm-ia64/vmx.h                           |    8 
 xen/include/asm-ia64/vmx_vcpu.h                      |    7 
 xen/include/asm-ia64/vmx_vpd.h                       |   10 
 xen/include/public/domctl.h                          |    7 
 xen/include/public/xen.h                             |    1 
 76 files changed, 2182 insertions(+), 1683 deletions(-)

diff -r e01441c9a607 -r 50aea0ec406b linux-2.6-xen-sparse/arch/ia64/dig/setup.c
--- a/linux-2.6-xen-sparse/arch/ia64/dig/setup.c        Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/dig/setup.c        Wed Aug 30 22:36:18 
2006 +0100
@@ -69,7 +69,7 @@ dig_setup (char **cmdline_p)
        screen_info.orig_video_isVGA = 1;       /* XXX fake */
        screen_info.orig_video_ega_bx = 3;      /* XXX fake */
 #ifdef CONFIG_XEN
-       if (!is_running_on_xen())
+       if (!is_running_on_xen() || !is_initial_xendomain())
                return;
 
        if (xen_start_info->console.dom0.info_size >=
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Wed Aug 30 22:36:18 
2006 +0100
@@ -560,7 +560,9 @@ setup_arch (char **cmdline_p)
 
        platform_setup(cmdline_p);
        paging_init();
+#ifdef CONFIG_XEN
        contiguous_bitmap_init(max_pfn);
+#endif
 }
 
 /*
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c   Wed Aug 30 22:36:18 
2006 +0100
@@ -371,6 +371,8 @@ int
 int
 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
 {
+       __u64 va1, va2, pa1, pa2;
+
        if (cmd == GNTTABOP_map_grant_ref) {
                unsigned int i;
                for (i = 0; i < count; i++) {
@@ -378,8 +380,29 @@ HYPERVISOR_grant_table_op(unsigned int c
                                (struct gnttab_map_grant_ref*)uop + i);
                }
        }
-
-       return ____HYPERVISOR_grant_table_op(cmd, uop, count);
+       va1 = (__u64)uop & PAGE_MASK;
+       pa1 = pa2 = 0;
+       if ((REGION_NUMBER(va1) == 5) &&
+           ((va1 - KERNEL_START) >= KERNEL_TR_PAGE_SIZE)) {
+               pa1 = ia64_tpa(va1);
+               if (cmd <= GNTTABOP_transfer) {
+                       static uint32_t uop_size[GNTTABOP_transfer + 1] = {
+                               sizeof(struct gnttab_map_grant_ref),
+                               sizeof(struct gnttab_unmap_grant_ref),
+                               sizeof(struct gnttab_setup_table),
+                               sizeof(struct gnttab_dump_table),
+                               sizeof(struct gnttab_transfer),
+                       };
+                       va2 = (__u64)uop + (uop_size[cmd] * count) - 1;
+                       va2 &= PAGE_MASK;
+                       if (va1 != va2) {
+                               /* maximum size of uop is 2pages */
+                               BUG_ON(va2 > va1 + PAGE_SIZE);
+                               pa2 = ia64_tpa(va2);
+                       }
+               }
+       }
+       return ____HYPERVISOR_grant_table_op(cmd, uop, count, pa1, pa2);
 }
 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
 
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c       Wed Aug 30 
14:09:31 2006 -0500
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c       Wed Aug 30 
22:36:18 2006 +0100
@@ -846,7 +846,7 @@ void __init setup_arch(char **cmdline_p)
 
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        /* Make sure we have a large enough P->M table. */
-                       phys_to_machine_mapping = alloc_bootmem(
+                       phys_to_machine_mapping = alloc_bootmem_pages(
                                end_pfn * sizeof(unsigned long));
                        memset(phys_to_machine_mapping, ~0,
                               end_pfn * sizeof(unsigned long));
@@ -863,7 +863,7 @@ void __init setup_arch(char **cmdline_p)
                         * list of frames that make up the p2m table. Used by
                          * save/restore.
                         */
-                       pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
+                       pfn_to_mfn_frame_list_list = 
alloc_bootmem_pages(PAGE_SIZE);
                        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list 
=
                                virt_to_mfn(pfn_to_mfn_frame_list_list);
 
@@ -873,7 +873,7 @@ void __init setup_arch(char **cmdline_p)
                                        k++;
                                        BUG_ON(k>=fpp);
                                        pfn_to_mfn_frame_list[k] =
-                                               alloc_bootmem(PAGE_SIZE);
+                                               alloc_bootmem_pages(PAGE_SIZE);
                                        pfn_to_mfn_frame_list_list[k] =
                                                
virt_to_mfn(pfn_to_mfn_frame_list[k]);
                                        j=0;
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Wed Aug 30 
14:09:31 2006 -0500
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Wed Aug 30 
22:36:18 2006 +0100
@@ -193,6 +193,7 @@ static void netfront_closing(struct xenb
 
 static void end_access(int, void *);
 static void netif_disconnect_backend(struct netfront_info *);
+static int open_netdev(struct netfront_info *);
 static void close_netdev(struct netfront_info *);
 static void netif_free(struct netfront_info *);
 
@@ -263,15 +264,22 @@ static int __devinit netfront_probe(stru
        dev->dev.driver_data = info;
 
        err = talk_to_backend(dev, info);
-       if (err) {
-               xennet_sysfs_delif(info->netdev);
-               unregister_netdev(netdev);
-               free_netdev(netdev);
-               dev->dev.driver_data = NULL;
-               return err;
-       }
+       if (err)
+               goto fail_backend;
+
+       err = open_netdev(info);
+       if (err)
+               goto fail_open;
 
        return 0;
+
+ fail_open:
+       xennet_sysfs_delif(info->netdev);
+       unregister_netdev(netdev);
+ fail_backend:
+       free_netdev(netdev);
+       dev->dev.driver_data = NULL;
+       return err;
 }
 
 
@@ -1887,27 +1895,9 @@ create_netdev(int handle, int copying_re
        SET_MODULE_OWNER(netdev);
        SET_NETDEV_DEV(netdev, &dev->dev);
 
-       err = register_netdev(netdev);
-       if (err) {
-               printk(KERN_WARNING "%s> register_netdev err=%d\n",
-                      __FUNCTION__, err);
-               goto exit_free_rx;
-       }
-
-       err = xennet_sysfs_addif(netdev);
-       if (err) {
-               /* This can be non-fatal: it only means no tuning parameters */
-               printk(KERN_WARNING "%s> add sysfs failed err=%d\n",
-                      __FUNCTION__, err);
-       }
-
        np->netdev = netdev;
-
        return netdev;
 
-
- exit_free_rx:
-       gnttab_free_grant_references(np->gref_rx_head);
  exit_free_tx:
        gnttab_free_grant_references(np->gref_tx_head);
  exit:
@@ -1966,6 +1956,26 @@ static int __devexit netfront_remove(str
        return 0;
 }
 
+
+static int open_netdev(struct netfront_info *info)
+{
+       int err;
+       
+       err = register_netdev(info->netdev);
+       if (err) {
+               printk(KERN_WARNING "%s: register_netdev err=%d\n",
+                      __FUNCTION__, err);
+               return err;
+       }
+
+       err = xennet_sysfs_addif(info->netdev);
+       if (err) {
+               /* This can be non-fatal: it only means no tuning parameters */
+               printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
+                      __FUNCTION__, err);
+       }
+       return 0;
+}
 
 static void close_netdev(struct netfront_info *info)
 {
diff -r e01441c9a607 -r 50aea0ec406b linux-2.6-xen-sparse/include/asm-ia64/agp.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/agp.h       Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/agp.h       Wed Aug 30 22:36:18 
2006 +0100
@@ -19,10 +19,21 @@
 #define flush_agp_cache()              mb()
 
 /* Convert a physical address to an address suitable for the GART. */
+#ifndef CONFIG_XEN
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+#else
 #define phys_to_gart(x) phys_to_machine_for_dma(x)
 #define gart_to_phys(x) machine_to_phys_for_dma(x)
+#endif
 
 /* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#ifndef CONFIG_XEN
+#define alloc_gatt_pages(order)                \
+       ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order)  \
+       free_pages((unsigned long)(table), (order))
+#else
 #include <asm/hypervisor.h>
 static inline char*
 alloc_gatt_pages(unsigned int order)
@@ -46,5 +57,6 @@ free_gatt_pages(void* table, unsigned in
        xen_destroy_contiguous_region((unsigned long)table, order);
        free_pages((unsigned long)table, order);
 }
+#endif /* CONFIG_XEN */
 
 #endif /* _ASM_IA64_AGP_H */
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/dma-mapping.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/dma-mapping.h       Wed Aug 30 
14:09:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/dma-mapping.h       Wed Aug 30 
22:36:18 2006 +0100
@@ -7,11 +7,28 @@
  */
 #include <linux/config.h>
 #include <asm/machvec.h>
+#ifdef CONFIG_XEN
 /* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
 #include <asm/hypervisor.h>
 /* Needed for arch/i386/kernel/swiotlb.c */
 #include <asm-i386/mach-xen/asm/swiotlb.h>
+#endif
 
+#ifndef CONFIG_XEN
+#define dma_alloc_coherent     platform_dma_alloc_coherent
+#define dma_alloc_noncoherent  platform_dma_alloc_coherent     /* coherent 
mem. is cheap */
+#define dma_free_coherent      platform_dma_free_coherent
+#define dma_free_noncoherent   platform_dma_free_coherent
+#define dma_map_single         platform_dma_map_single
+#define dma_map_sg             platform_dma_map_sg
+#define dma_unmap_single       platform_dma_unmap_single
+#define dma_unmap_sg           platform_dma_unmap_sg
+#define dma_sync_single_for_cpu        platform_dma_sync_single_for_cpu
+#define dma_sync_sg_for_cpu    platform_dma_sync_sg_for_cpu
+#define dma_sync_single_for_device platform_dma_sync_single_for_device
+#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
+#define dma_mapping_error      platform_dma_mapping_error
+#else
 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
                enum dma_data_direction direction);
 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
@@ -50,6 +67,7 @@ dma_sync_sg_for_device(struct device *de
                swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
        flush_write_buffers();
 }
+#endif
 
 #define dma_map_page(dev, pg, off, size, dir)                          \
        dma_map_single(dev, page_address(pg) + (off), (size), (dir))
@@ -91,6 +109,7 @@ dma_cache_sync (void *vaddr, size_t size
 
 #define dma_is_consistent(dma_handle)  (1)     /* all we do is coherent 
memory... */
 
+#ifdef CONFIG_XEN
 /* arch/i386/kernel/swiotlb.o requires */
 void contiguous_bitmap_init(unsigned long end_pfn);
 
@@ -111,5 +130,6 @@ range_straddles_page_boundary(void *p, s
        return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
                !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
 }
+#endif
 
 #endif /* _ASM_IA64_DMA_MAPPING_H */
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Wed Aug 30 22:36:18 
2006 +0100
@@ -51,7 +51,7 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name)       \
+                             : "J" (__HYPERVISOR_##name)       \
                              : "r2","r8",                      \
                                "memory" );                     \
        (type)__res;                                            \
@@ -66,8 +66,8 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "r" ((unsigned long)(a1))       \
+                             : "J" (__HYPERVISOR_##name),      \
+                               "rI" ((unsigned long)(a1))      \
                              : "r14","r2","r8",                \
                                "memory" );                     \
        (type)__res;                                            \
@@ -83,9 +83,9 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2))       \
+                             : "J" (__HYPERVISOR_##name),      \
+                               "rI" ((unsigned long)(a1)),     \
+                               "rI" ((unsigned long)(a2))      \
                              : "r14","r15","r2","r8",          \
                                "memory" );                     \
        (type)__res;                                            \
@@ -102,10 +102,10 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2)),      \
-                               "r" ((unsigned long)(a3))       \
+                             : "J" (__HYPERVISOR_##name),      \
+                               "rI" ((unsigned long)(a1)),     \
+                               "rI" ((unsigned long)(a2)),     \
+                               "rI" ((unsigned long)(a3))      \
                              : "r14","r15","r16","r2","r8",    \
                                "memory" );                     \
        (type)__res;                                            \
@@ -123,11 +123,11 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2)),      \
-                               "r" ((unsigned long)(a3)),      \
-                               "r" ((unsigned long)(a4))       \
+                             : "J" (__HYPERVISOR_##name),      \
+                               "rI" ((unsigned long)(a1)),     \
+                               "rI" ((unsigned long)(a2)),     \
+                               "rI" ((unsigned long)(a3)),     \
+                               "rI" ((unsigned long)(a4))      \
                              : "r14","r15","r16","r2","r8",    \
                                "r17","memory" );               \
        (type)__res;                                            \
@@ -146,12 +146,12 @@
                              "break 0x1000 ;;\n"               \
                              "mov %0=r8 ;;\n"                  \
                              : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2)),      \
-                               "r" ((unsigned long)(a3)),      \
-                               "r" ((unsigned long)(a4)),      \
-                               "r" ((unsigned long)(a5))       \
+                             : "J" (__HYPERVISOR_##name),      \
+                               "rI" ((unsigned long)(a1)),     \
+                               "rI" ((unsigned long)(a2)),     \
+                               "rI" ((unsigned long)(a3)),     \
+                               "rI" ((unsigned long)(a4)),     \
+                               "rI" ((unsigned long)(a5))      \
                              : "r14","r15","r16","r2","r8",    \
                                "r17","r18","memory" );         \
        (type)__res;                                            \
@@ -275,9 +275,10 @@ HYPERVISOR_physdev_op(
 //XXX __HYPERVISOR_grant_table_op is used for this hypercall constant.
 static inline int
 ____HYPERVISOR_grant_table_op(
-    unsigned int cmd, void *uop, unsigned int count)
-{
-    return _hypercall3(int, grant_table_op, cmd, uop, count);
+    unsigned int cmd, void *uop, unsigned int count,
+    unsigned long pa1, unsigned long pa2)
+{
+    return _hypercall5(int, grant_table_op, cmd, uop, count, pa1, pa2);
 }
 
 int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
@@ -290,6 +291,13 @@ HYPERVISOR_vcpu_op(
 }
 
 extern int HYPERVISOR_suspend(unsigned long srec);
+
+static inline unsigned long
+HYPERVISOR_hvm_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(unsigned long, hvm_op, cmd, arg);
+}
 
 static inline int
 HYPERVISOR_callback_op(
@@ -307,99 +315,15 @@ static inline void exit_idle(void) {}
 })
 
 #include <linux/err.h>
+#ifdef CONFIG_XEN
 #include <asm/xen/privop.h>
-
-#define _hypercall_imm1(type, name, imm, a1)                   \
-({                                                             \
-       long __res;                                             \
-       __asm__ __volatile__ (";;\n"                            \
-                             "mov r14=%2\n"                    \
-                             "mov r15=%3\n"                    \
-                             "mov r2=%1\n"                     \
-                             "break 0x1000 ;;\n"               \
-                             "mov %0=r8 ;;\n"                  \
-                             : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "i" (imm),                      \
-                               "r" ((unsigned long)(a1))       \
-                             : "r14","r15","r2","r8",          \
-                               "memory" );                     \
-       (type)__res;                                            \
-})
-
-#define _hypercall_imm2(type, name, imm, a1, a2)               \
-({                                                             \
-       long __res;                                             \
-       __asm__ __volatile__ (";;\n"                            \
-                             "mov r14=%2\n"                    \
-                             "mov r15=%3\n"                    \
-                             "mov r16=%4\n"                    \
-                             "mov r2=%1\n"                     \
-                             "break 0x1000 ;;\n"               \
-                             "mov %0=r8 ;;\n"                  \
-                             : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "i" (imm),                      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2))       \
-                             : "r14","r15","r16","r2","r8",    \
-                               "memory" );                     \
-       (type)__res;                                            \
-})
-
-#define _hypercall_imm3(type, name, imm, a1, a2, a3)           \
-({                                                             \
-       long __res;                                             \
-       __asm__ __volatile__ (";;\n"                            \
-                             "mov r14=%2\n"                    \
-                             "mov r15=%3\n"                    \
-                             "mov r16=%4\n"                    \
-                             "mov r17=%5\n"                    \
-                             "mov r2=%1\n"                     \
-                             "break 0x1000 ;;\n"               \
-                             "mov %0=r8 ;;\n"                  \
-                             : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "i" (imm),                      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2)),      \
-                               "r" ((unsigned long)(a3))       \
-                             : "r14","r15","r16","r17",        \
-                               "r2","r8",                      \
-                               "memory" );                     \
-       (type)__res;                                            \
-})
-
-#define _hypercall_imm4(type, name, imm, a1, a2, a3, a4)       \
-({                                                             \
-       long __res;                                             \
-       __asm__ __volatile__ (";;\n"                            \
-                             "mov r14=%2\n"                    \
-                             "mov r15=%3\n"                    \
-                             "mov r16=%4\n"                    \
-                             "mov r17=%5\n"                    \
-                             "mov r18=%6\n"                    \
-                             "mov r2=%1\n"                     \
-                             "break 0x1000 ;;\n"               \
-                             "mov %0=r8 ;;\n"                  \
-                             : "=r" (__res)                    \
-                             : "i" (__HYPERVISOR_##name),      \
-                               "i" (imm),                      \
-                               "r" ((unsigned long)(a1)),      \
-                               "r" ((unsigned long)(a2)),      \
-                               "r" ((unsigned long)(a3)),      \
-                               "r" ((unsigned long)(a4))       \
-                             : "r14","r15","r16","r17","r18",  \
-                               "r2","r8",                      \
-                               "memory" );                     \
-       (type)__res;                                            \
-})
+#endif /* CONFIG_XEN */
 
 static inline unsigned long
 __HYPERVISOR_ioremap(unsigned long ioaddr, unsigned long size)
 {
-       return _hypercall_imm2(unsigned long, ia64_dom0vp_op,
-                              IA64_DOM0VP_ioremap, ioaddr, size);
+       return _hypercall3(unsigned long, ia64_dom0vp_op,
+                          IA64_DOM0VP_ioremap, ioaddr, size);
 }
 
 static inline unsigned long
@@ -421,8 +345,8 @@ static inline unsigned long
 static inline unsigned long
 __HYPERVISOR_phystomach(unsigned long gpfn)
 {
-       return _hypercall_imm1(unsigned long, ia64_dom0vp_op,
-                              IA64_DOM0VP_phystomach, gpfn);
+       return _hypercall2(unsigned long, ia64_dom0vp_op,
+                          IA64_DOM0VP_phystomach, gpfn);
 }
 
 static inline unsigned long
@@ -438,8 +362,8 @@ static inline unsigned long
 static inline unsigned long
 __HYPERVISOR_machtophys(unsigned long mfn)
 {
-       return _hypercall_imm1(unsigned long, ia64_dom0vp_op,
-                              IA64_DOM0VP_machtophys, mfn);
+       return _hypercall2(unsigned long, ia64_dom0vp_op,
+                          IA64_DOM0VP_machtophys, mfn);
 }
 
 static inline unsigned long
@@ -455,8 +379,8 @@ static inline unsigned long
 static inline unsigned long
 __HYPERVISOR_zap_physmap(unsigned long gpfn, unsigned int extent_order)
 {
-       return _hypercall_imm2(unsigned long, ia64_dom0vp_op,
-                              IA64_DOM0VP_zap_physmap, gpfn, extent_order);
+       return _hypercall3(unsigned long, ia64_dom0vp_op,
+                          IA64_DOM0VP_zap_physmap, gpfn, extent_order);
 }
 
 static inline unsigned long
@@ -473,9 +397,8 @@ __HYPERVISOR_add_physmap(unsigned long g
 __HYPERVISOR_add_physmap(unsigned long gpfn, unsigned long mfn,
                         unsigned long flags, domid_t domid)
 {
-       return _hypercall_imm4(unsigned long, ia64_dom0vp_op,
-                              IA64_DOM0VP_add_physmap, gpfn, mfn, flags,
-                              domid);
+       return _hypercall5(unsigned long, ia64_dom0vp_op,
+                          IA64_DOM0VP_add_physmap, gpfn, mfn, flags, domid);
 }
 
 static inline unsigned long
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Wed Aug 30 
14:09:31 2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Wed Aug 30 
22:36:18 2006 +0100
@@ -33,7 +33,7 @@
 #ifndef __HYPERVISOR_H__
 #define __HYPERVISOR_H__
 
-#ifndef CONFIG_XEN
+#if !defined(CONFIG_XEN) && !defined(CONFIG_VMX_GUEST)
 #define is_running_on_xen()                    (0)
 #define HYPERVISOR_ioremap(offset, size)       (offset)
 #else
@@ -41,7 +41,7 @@ extern int running_on_xen;
 #define is_running_on_xen()                    (running_on_xen)
 #endif
 
-#ifdef CONFIG_XEN
+#if defined(CONFIG_XEN) || defined(CONFIG_VMX_GUEST)
 #include <linux/config.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -59,10 +59,9 @@ extern shared_info_t *HYPERVISOR_shared_
 extern shared_info_t *HYPERVISOR_shared_info;
 extern start_info_t *xen_start_info;
 
-#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
-
 void force_evtchn_callback(void);
 
+#ifndef CONFIG_VMX_GUEST
 /* Turn jiffies into Xen system time. XXX Implement me. */
 #define jiffies_to_st(j)       0
 
@@ -145,10 +144,14 @@ int privcmd_mmap(struct file * file, str
 #define scrub_pages(_p,_n) ((void)0)
 #endif
 #define        pte_mfn(_x)     pte_pfn(_x)
-#define __pte_ma(_x)   ((pte_t) {(_x)})
 #define phys_to_machine_mapping_valid(_x)      (1)
-#define pfn_pte_ma(_x,_y)      __pte_ma(0)
-
+
+#endif /* !CONFIG_VMX_GUEST */
+
+#define __pte_ma(_x)   ((pte_t) {(_x)})        /* unmodified use */
+#define pfn_pte_ma(_x,_y)      __pte_ma(0)     /* unmodified use */
+
+#ifndef CONFIG_VMX_GUEST
 int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, 
unsigned int address_bits);
 static inline int
 xen_create_contiguous_region(unsigned long vstart,
@@ -170,6 +173,8 @@ xen_destroy_contiguous_region(unsigned l
                __xen_destroy_contiguous_region(vstart, order);
 }
 
+#endif /* !CONFIG_VMX_GUEST */
+
 // for netfront.c, netback.c
 #define MULTI_UVMFLAGS_INDEX 0 //XXX any value
 
@@ -180,12 +185,29 @@ MULTI_update_va_mapping(
 {
        mcl->op = __HYPERVISOR_update_va_mapping;
        mcl->result = 0;
+}
+
+static inline void
+MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
+       void *uop, unsigned int count)
+{
+       mcl->op = __HYPERVISOR_grant_table_op;
+       mcl->args[0] = cmd;
+       mcl->args[1] = (unsigned long)uop;
+       mcl->args[2] = count;
 }
 
 // for debug
 asmlinkage int xprintk(const char *fmt, ...);
 #define xprintd(fmt, ...)      xprintk("%s:%d " fmt, __func__, __LINE__, \
                                        ##__VA_ARGS__)
-#endif /* CONFIG_XEN */
+
+#endif /* CONFIG_XEN || CONFIG_VMX_GUEST */
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
+#else
+#define is_initial_xendomain() 0
+#endif
 
 #endif /* __HYPERVISOR_H__ */
diff -r e01441c9a607 -r 50aea0ec406b linux-2.6-xen-sparse/include/asm-ia64/io.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/io.h        Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/io.h        Wed Aug 30 22:36:18 
2006 +0100
@@ -97,6 +97,11 @@ extern int valid_mmap_phys_addr_range (u
  * The following two macros are deprecated and scheduled for removal.
  * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
  */
+#ifndef CONFIG_XEN
+#define bus_to_virt    phys_to_virt
+#define virt_to_bus    virt_to_phys
+#define page_to_bus    page_to_phys
+#else
 #define bus_to_virt(bus)       \
        phys_to_virt(machine_to_phys_for_dma(bus))
 #define virt_to_bus(virt)      \
@@ -124,6 +129,7 @@ extern int valid_mmap_phys_addr_range (u
        (((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
         ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) ==              \
          bvec_to_pseudophys((vec2))))
+#endif /* CONFIG_XEN */
 
 # endif /* KERNEL */
 
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/machvec.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/machvec.h   Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/machvec.h   Wed Aug 30 22:36:18 
2006 +0100
@@ -247,6 +247,7 @@ extern void machvec_init (const char *na
 #  error Unknown configuration.  Update asm-ia64/machvec.h.
 # endif /* CONFIG_IA64_GENERIC */
 
+#ifdef CONFIG_XEN
 # define platform_dma_map_sg           dma_map_sg
 # define platform_dma_unmap_sg         dma_unmap_sg
 # define platform_dma_mapping_error    dma_mapping_error
@@ -259,6 +260,7 @@ extern void machvec_init (const char *na
                                        dma_sync_single_for_cpu
 # define platform_dma_sync_single_for_device \
                                        dma_sync_single_for_device
+#endif
 
 /*
  * Declare default routines which aren't declared anywhere else:
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/page.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/page.h      Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/page.h      Wed Aug 30 22:36:18 
2006 +0100
@@ -117,6 +117,9 @@ extern unsigned long max_low_pfn;
 # define pfn_to_page(pfn)      (vmem_map + (pfn))
 #endif
 
+#ifndef CONFIG_XEN
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+#endif
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
@@ -221,12 +224,9 @@ get_order (unsigned long size)
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_XEN
 
-#define INVALID_P2M_ENTRY      (~0UL)
-
 #include <linux/kernel.h>
 #include <asm/hypervisor.h>
 #include <xen/features.h>      // to compile netback, netfront
-typedef unsigned long maddr_t; // to compile netback, netfront
 
 /*
  * XXX hack!
@@ -265,68 +265,7 @@ extern struct address_space xen_ia64_for
 })
 #define HAVE_ARCH_FREE_PAGE
 
-/* XXX xen page size != page size */
-
-static inline unsigned long
-pfn_to_mfn_for_dma(unsigned long pfn)
-{
-       unsigned long mfn;
-       mfn = HYPERVISOR_phystomach(pfn);
-       BUG_ON(mfn == 0); // XXX
-       BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
-       BUG_ON(mfn == INVALID_MFN);
-       return mfn;
-}
-
-static inline unsigned long
-phys_to_machine_for_dma(unsigned long phys)
-{
-       unsigned long machine =
-                     pfn_to_mfn_for_dma(phys >> PAGE_SHIFT) << PAGE_SHIFT;
-       machine |= (phys & ~PAGE_MASK);
-       return machine;
-}
-
-static inline unsigned long
-mfn_to_pfn_for_dma(unsigned long mfn)
-{
-       unsigned long pfn;
-       pfn = HYPERVISOR_machtophys(mfn);
-       BUG_ON(pfn == 0);
-       //BUG_ON(pfn == INVALID_M2P_ENTRY);
-       return pfn;
-}
-
-static inline unsigned long
-machine_to_phys_for_dma(unsigned long machine)
-{
-       unsigned long phys =
-                     mfn_to_pfn_for_dma(machine >> PAGE_SHIFT) << PAGE_SHIFT;
-       phys |= (machine & ~PAGE_MASK);
-       return phys;
-}
-
-#define set_phys_to_machine(pfn, mfn) do { } while (0)
-#define xen_machphys_update(mfn, pfn) do { } while (0)
-
-/* XXX to compile set_phys_to_machine(vaddr, FOREIGN_FRAME(m)) */
-#define FOREIGN_FRAME(m)        (INVALID_P2M_ENTRY)
-
-#define mfn_to_pfn(mfn)                        (mfn)
-#define mfn_to_virt(mfn)               (__va((mfn) << PAGE_SHIFT))
-#define pfn_to_mfn(pfn)                        (pfn)
-#define virt_to_mfn(virt)              (__pa(virt) >> PAGE_SHIFT)
-#define virt_to_machine(virt)          __pa(virt) // for tpmfront.c
-
-static inline unsigned long
-mfn_to_local_pfn(unsigned long mfn)
-{
-       extern unsigned long max_mapnr;
-       unsigned long pfn = mfn_to_pfn_for_dma(mfn);
-       if (!pfn_valid(pfn))
-               return INVALID_P2M_ENTRY;
-       return pfn;
-}
+#include <asm/maddr.h>
 
 #endif /* CONFIG_XEN */
 #endif /* __ASSEMBLY__ */
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h   Wed Aug 30 14:09:31 
2006 -0500
+++ b/linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h   Wed Aug 30 22:36:18 
2006 +0100
@@ -126,7 +126,11 @@ static inline void
 static inline void
 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
 {
+#ifndef CONFIG_XEN
+       pmd_val(*pmd_entry) = page_to_phys(pte);
+#else
        pmd_val(*pmd_entry) = page_to_pseudophys(pte);
+#endif
 }
 
 static inline void
diff -r e01441c9a607 -r 50aea0ec406b tools/debugger/gdb/gdbbuild
--- a/tools/debugger/gdb/gdbbuild       Wed Aug 30 14:09:31 2006 -0500
+++ b/tools/debugger/gdb/gdbbuild       Wed Aug 30 22:36:18 2006 +0100
@@ -18,7 +18,7 @@ if [ "$MAKE" ]; then
 if [ "$MAKE" ]; then
     $MAKE
 elif which gmake ; then
-    gmake -j4
+    gmake -j4 CFLAGS=-D__XEN_TOOLS__
 else
-    make -j4
+    make -j4 CFLAGS=-D__XEN_TOOLS__
 fi
diff -r e01441c9a607 -r 50aea0ec406b tools/examples/xmexample.vti
--- a/tools/examples/xmexample.vti      Wed Aug 30 14:09:31 2006 -0500
+++ b/tools/examples/xmexample.vti      Wed Aug 30 22:36:18 2006 +0100
@@ -37,11 +37,9 @@ name = "ExampleVTIDomain"
 
 # Optionally define mac and/or bridge for the network interfaces.
 # Random MACs are assigned if not given.
-#vif = [ 'type=ioemu, mac=00:16:3e:00:00:11, bridge=xenbr0' ]
+#vif = [ 'type=ioemu, mac=00:16:3e:00:00:11, bridge=xenbr0, model=ne2k_pci' ]
 # type=ioemu specify the NIC is an ioemu device not netfront
 vif = [ 'type=ioemu, bridge=xenbr0' ]
-# for multiple NICs in device model, 3 in this example
-#vif = [ 'type=ioemu, bridge=xenbr0', 'type=ioemu', 'type=ioemu']
 
 #----------------------------------------------------------------------------
 # Define the disk devices you want the domain to have access to, and
@@ -51,7 +49,7 @@ vif = [ 'type=ioemu, bridge=xenbr0' ]
 # and MODE is r for read-only, w for read-write.
 
 #disk = [ 'phy:hda1,hda1,r' ]
-disk = [ 'file:/var/images/xenia64.img,ioemu:hda,w' ]
+disk = [ 'file:/var/images/xenia64.img,hda,w', ',hdc:cdrom,r' ]
 
 #----------------------------------------------------------------------------
 # Set according to whether you want the domain restarted when it exits.
@@ -65,13 +63,6 @@ disk = [ 'file:/var/images/xenia64.img,i
 
 # New stuff
 device_model = '/usr/' + arch_libdir + '/xen/bin/qemu-dm'
-
-# Advanced users only. Don't touch if you don't know what you're doing
-memmap = '/usr/lib/xen/boot/mem-map.sxp'
-
-#-----------------------------------------------------------------------------
-# Disk image for 
-#cdrom=
 
 #-----------------------------------------------------------------------------
 # boot on floppy (a), hard disk (c) or CD-ROM (d) 
@@ -91,8 +82,17 @@ vnc=0
 vnc=0
 
 #----------------------------------------------------------------------------
-# enable spawning vncviewer(only valid when vnc=1), default = 1
-vncviewer=0
+# set VNC display number, default = domid
+#vncdisplay=1
+
+#----------------------------------------------------------------------------
+# try to find an unused port for the VNC server, default = 1
+#vncunused=1
+
+#----------------------------------------------------------------------------
+# enable spawning vncviewer for domain's console
+# (only valid when vnc=1), default = 0
+#vncconsole=0
 
 #----------------------------------------------------------------------------
 # no graphics, use serial port
@@ -108,14 +108,12 @@ serial='pty'
 serial='pty'
 
 #-----------------------------------------------------------------------------
-#   enable audio support
-#enable-audio=1
-
+#   enable sound card support, [sb16|es1370|all|..,..], default none
+#soundhw='sb16'
 
 #-----------------------------------------------------------------------------
 #    set the real time clock to local time [default=0 i.e. set to utc]
 #localtime=1
-
 
 #-----------------------------------------------------------------------------
 #    start in full screen
diff -r e01441c9a607 -r 50aea0ec406b tools/libxc/ia64/xc_ia64_hvm_build.c
--- a/tools/libxc/ia64/xc_ia64_hvm_build.c      Wed Aug 30 14:09:31 2006 -0500
+++ b/tools/libxc/ia64/xc_ia64_hvm_build.c      Wed Aug 30 22:36:18 2006 +0100
@@ -553,7 +553,6 @@ setup_guest(int xc_handle, uint32_t dom,
 {
     unsigned long page_array[2];
     shared_iopage_t *sp;
-    int i;
     unsigned long dom_memsize = (memsize << 20);
     DECLARE_DOMCTL;
 
@@ -604,18 +603,6 @@ setup_guest(int xc_handle, uint32_t dom,
         goto error_out;
 
     memset(sp, 0, PAGE_SIZE);
-
-    for (i = 0; i < vcpus; i++) {
-        uint32_t vp_eport;
-
-        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
-        if (vp_eport < 0) {
-            DPRINTF("Couldn't get unbound port from VMX guest.\n");
-            goto error_out;
-        }
-        sp->vcpu_iodata[i].vp_eport = vp_eport;
-    }
-
     munmap(sp, PAGE_SIZE);
 
     return 0;
diff -r e01441c9a607 -r 50aea0ec406b tools/libxc/ia64/xc_ia64_linux_save.c
--- a/tools/libxc/ia64/xc_ia64_linux_save.c     Wed Aug 30 14:09:31 2006 -0500
+++ b/tools/libxc/ia64/xc_ia64_linux_save.c     Wed Aug 30 22:36:18 2006 +0100
@@ -79,7 +79,7 @@ static int xc_ia64_shadow_control(int xc
     }
 
     return xc_shadow_control(xc_handle, domid, sop,
-                             dirty_bitmap, pages, stats);
+                             dirty_bitmap, pages, NULL, 0, stats);
 }
 
 static inline ssize_t
diff -r e01441c9a607 -r 50aea0ec406b tools/libxc/ia64/xc_ia64_stubs.c
--- a/tools/libxc/ia64/xc_ia64_stubs.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/tools/libxc/ia64/xc_ia64_stubs.c  Wed Aug 30 22:36:18 2006 +0100
@@ -36,7 +36,6 @@ xc_ia64_get_pfn_list(int xc_handle, uint
     struct xen_domctl domctl;
     int num_pfns,ret;
     unsigned int __start_page, __nr_pages;
-    unsigned long max_pfns;
     xen_pfn_t *__pfn_buf;
 
     __start_page = start_page;
@@ -44,27 +43,22 @@ xc_ia64_get_pfn_list(int xc_handle, uint
     __pfn_buf = pfn_buf;
   
     while (__nr_pages) {
-        max_pfns = ((unsigned long)__start_page << 32) | __nr_pages;
         domctl.cmd = XEN_DOMCTL_getmemlist;
-        domctl.domain   = (domid_t)domid;
-        domctl.u.getmemlist.max_pfns = max_pfns;
+        domctl.domain = (domid_t)domid;
+        domctl.u.getmemlist.max_pfns = __nr_pages;
+        domctl.u.getmemlist.start_pfn =__start_page;
         domctl.u.getmemlist.num_pfns = 0;
         set_xen_guest_handle(domctl.u.getmemlist.buffer, __pfn_buf);
 
-        if ((max_pfns != -1UL)
-            && mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
+        if (mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
             PERROR("Could not lock pfn list buffer");
             return -1;
         }
 
         ret = do_domctl(xc_handle, &domctl);
 
-        if (max_pfns != -1UL)
-            (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
+        (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
 
-        if (max_pfns == -1UL)
-            return 0;
-        
         num_pfns = domctl.u.getmemlist.num_pfns;
         __start_page += num_pfns;
         __nr_pages -= num_pfns;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/asm-offsets.c       Wed Aug 30 22:36:18 2006 +0100
@@ -31,7 +31,6 @@ void foo(void)
        DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
        DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
        DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
-       DEFINE(SHARED_INFO_SIZE, sizeof (struct shared_info));
        DEFINE(MAPPED_REGS_T_SIZE, sizeof (mapped_regs_t));
 
        BLANK();
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/efi.c     Wed Aug 30 22:36:18 2006 +0100
@@ -291,28 +291,6 @@ efi_memmap_walk_uc (efi_freemem_callback
 {
        walk(callback, arg, EFI_MEMORY_UC);
 }
-
-#ifdef XEN
-void
-efi_memmap_walk_type(u32 type, efi_walk_type_callback_t callback, void *arg)
-{
-       void *efi_map_start, *efi_map_end, *p;
-       efi_memory_desc_t *md;
-       u64 efi_desc_size;
-
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-               md = p;
-               if (md->type == type) {
-                       if ((*callback)(md, arg) < 0)
-                               return;
-               }
-       }
-}
-#endif
 
 /*
  * Look for the PAL_CODE region reported by EFI and maps it using an
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/iosapic.c Wed Aug 30 22:36:18 2006 +0100
@@ -77,6 +77,8 @@
 #include <linux/list.h>
 #ifndef XEN
 #include <linux/pci.h>
+#else
+#include <xen/errno.h>
 #endif
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c        Wed Aug 30 22:36:18 2006 +0100
@@ -40,6 +40,10 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+#ifdef XEN
+#include <xen/perfc.h>
+#endif
+
 #ifdef CONFIG_PERFMON
 # include <asm/perfmon.h>
 #endif
@@ -108,6 +112,9 @@ ia64_handle_irq (ia64_vector vector, str
 {
        unsigned long saved_tpr;
 
+#ifdef XEN
+       perfc_incrc(irqs);
+#endif
 #if IRQ_DEBUG
 #ifdef XEN
        xen_debug_irq(vector, regs);
@@ -290,8 +297,5 @@ ia64_send_ipi (int cpu, int vector, int 
        ipi_data = (delivery_mode << 8) | (vector & 0xff);
        ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
 
-#ifdef XEN
-       //printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id);
-#endif
        writeq(ipi_data, ipi_addr);
 }
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/mca.c     Wed Aug 30 22:36:18 2006 +0100
@@ -79,6 +79,7 @@
 
 #ifdef XEN
 #include <xen/symbols.h>
+#include <xen/mm.h>
 #endif
 
 #if defined(IA64_MCA_DEBUG_INFO)
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/process-linux-xen.c
--- a/xen/arch/ia64/linux-xen/process-linux-xen.c       Wed Aug 30 14:09:31 
2006 -0500
+++ b/xen/arch/ia64/linux-xen/process-linux-xen.c       Wed Aug 30 22:36:18 
2006 +0100
@@ -10,6 +10,7 @@
 #include <xen/lib.h>
 #include <xen/symbols.h>
 #include <xen/smp.h>
+#include <xen/sched.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
@@ -166,6 +167,7 @@ show_regs (struct pt_regs *regs)
        printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, 
regs->r27, regs->r28);
        printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, 
regs->r30, regs->r31);
 
+#ifndef XEN
        if (user_mode(regs)) {
                /* print the stacked registers */
                unsigned long val, *bsp, ndirty;
@@ -180,6 +182,7 @@ show_regs (struct pt_regs *regs)
                               ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
                }
        } else
+#endif
                show_stack(NULL, NULL);
 }
 
@@ -807,6 +810,7 @@ get_wchan (struct task_struct *p)
        } while (count++ < 16);
        return 0;
 }
+#endif // !XEN
 
 void
 cpu_halt (void)
@@ -831,6 +835,7 @@ cpu_halt (void)
                ia64_pal_halt(min_power_state);
 }
 
+#ifndef XEN
 void
 machine_restart (char *restart_cmd)
 {
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/smp.c     Wed Aug 30 22:36:18 2006 +0100
@@ -48,6 +48,7 @@
 #include <asm/unistd.h>
 #include <asm/mca.h>
 #ifdef XEN
+#include <xen/errno.h>
 #include <asm/vhpt.h>
 #include <asm/hw_irq.h>
 #endif
@@ -146,6 +147,9 @@ handle_IPI (int irq, void *dev_id, struc
        unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
        unsigned long ops;
 
+#ifdef XEN
+       perfc_incrc(ipis);
+#endif
        mb();   /* Order interrupt and bit testing. */
        while ((ops = xchg(pending_ipis, 0)) != 0) {
                mb();   /* Order bit clearing and data access. */
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/linux-xen/unwind.c
--- a/xen/arch/ia64/linux-xen/unwind.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/linux-xen/unwind.c  Wed Aug 30 22:36:18 2006 +0100
@@ -33,6 +33,7 @@
 #include <xen/sched.h>
 #include <xen/xmalloc.h>
 #include <xen/spinlock.h>
+#include <xen/errno.h>
 
 // work around
 #ifdef CONFIG_SMP
@@ -2315,6 +2316,7 @@ unw_init (void)
                          __start_unwind, __end_unwind);
 }
 
+#ifndef XEN
 /*
  * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
  *
@@ -2353,3 +2355,4 @@ sys_getunwind (void __user *buf, size_t 
                        return -EFAULT;
        return unw.gate_table_size;
 }
+#endif
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/Makefile
--- a/xen/arch/ia64/vmx/Makefile        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/Makefile        Wed Aug 30 22:36:18 2006 +0100
@@ -1,5 +1,5 @@ obj-y += hvm_vioapic.o
 obj-y += hvm_vioapic.o
-obj-y += mm.o
+#obj-y += mm.o
 obj-y += mmio.o
 obj-y += pal_emul.o
 obj-y += vlsapic.o
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/mmio.c  Wed Aug 30 22:36:18 2006 +0100
@@ -22,6 +22,7 @@
  */
 
 #include <linux/sched.h>
+#include <xen/mm.h>
 #include <asm/tlb.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/gcc_intrin.h>
@@ -30,7 +31,6 @@
 #include <asm/bundle.h>
 #include <asm/types.h>
 #include <public/hvm/ioreq.h>
-#include <asm/mm.h>
 #include <asm/vmx.h>
 #include <public/event_channel.h>
 #include <public/xen.h>
@@ -155,10 +155,9 @@ static void low_mmio_access(VCPU *vcpu, 
     p->type = 1;
     p->df = 0;
 
-    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
-    p->state = STATE_IOREQ_READY;
-    evtchn_send(iopacket_port(v));
-    vmx_wait_io();
+    p->io_count++;
+
+    vmx_send_assist_req(v);
     if(dir==IOREQ_READ){ //read
         *val=p->u.data;
     }
@@ -187,11 +186,9 @@ static void legacy_io_access(VCPU *vcpu,
     p->type = 0;
     p->df = 0;
 
-    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
-    p->state = STATE_IOREQ_READY;
-    evtchn_send(iopacket_port(v));
-
-    vmx_wait_io();
+    p->io_count++;
+
+    vmx_send_assist_req(v);
     if(dir==IOREQ_READ){ //read
         *val=p->u.data;
     }
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Aug 30 22:36:18 2006 +0100
@@ -33,6 +33,7 @@
 #include <asm/kregs.h>
 #include <asm/vcpu.h>
 #include <xen/irq.h>
+#include <xen/errno.h>
 
 /*
  * Get the machine page frame number in 16KB unit
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Aug 30 22:36:18 2006 +0100
@@ -59,21 +59,23 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
         }
         else if (IS_PRIV(current->domain)) {
             d = find_domain_by_id(a.domid);
-            if (!d)
+            if (d == NULL)
                 return -ESRCH;
         }
         else
             return -EPERM;
 
         if (op == HVMOP_set_param) {
+            d->arch.hvm_domain.params[a.index] = a.value;
             rc = 0;
-            d->arch.hvm_domain.params[a.index] = a.value;
         }
-        else
-            rc = d->arch.hvm_domain.params[a.index];
+        else {
+            a.value = d->arch.hvm_domain.params[a.index];
+            rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+        }
 
         put_domain(d);
-        return rc;
+        break;
     }
 
     default:
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Aug 30 22:36:18 2006 +0100
@@ -206,7 +206,7 @@ vmx_create_vp(struct vcpu *v)
        u64 ret;
        vpd_t *vpd = (vpd_t *)v->arch.privregs;
        u64 ivt_base;
-    extern char vmx_ia64_ivt;
+       extern char vmx_ia64_ivt;
        /* ia64_ivt is function pointer, so need this tranlation */
        ivt_base = (u64) &vmx_ia64_ivt;
        printk("ivt_base: 0x%lx\n", ivt_base);
@@ -265,6 +265,29 @@ vmx_load_state(struct vcpu *v)
         * anchored in vcpu */
 }
 
+static void vmx_create_event_channels(struct vcpu *v)
+{
+       vcpu_iodata_t *p;
+       struct vcpu *o;
+
+       if (v->vcpu_id == 0) {
+               /* Ugly: create event channels for every vcpu when vcpu 0
+                  starts, so that they're available for ioemu to bind to. */
+               for_each_vcpu(v->domain, o) {
+                       p = get_vio(v->domain, o->vcpu_id);
+                       o->arch.arch_vmx.xen_port = p->vp_eport =
+                                       alloc_unbound_xen_event_channel(o, 0);
+                       DPRINTK("Allocated port %d for hvm.\n",
+                               o->arch.arch_vmx.xen_port);
+               }
+       }
+}
+
+static void vmx_release_assist_channel(struct vcpu *v)
+{
+       free_xen_event_channel(v, v->arch.arch_vmx.xen_port);
+}
+
 /*
  * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
  * is registered here.
@@ -286,6 +309,8 @@ vmx_final_setup_guest(struct vcpu *v)
 #ifndef HASH_VHPT     
         init_domain_tlb(v);
 #endif
+       vmx_create_event_channels(v);
+
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
 
@@ -301,6 +326,15 @@ vmx_final_setup_guest(struct vcpu *v)
        set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
        /* Set up guest 's indicator for VTi domain*/
        set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
+}
+
+void
+vmx_relinquish_guest_resources(struct domain *d)
+{
+       struct vcpu *v;
+
+       for_each_vcpu(d, v)
+               vmx_release_assist_channel(v);
 }
 
 void
@@ -411,6 +445,9 @@ void vmx_setup_platform(struct domain *d
        memset(&d->shared_info->evtchn_mask[0], 0xff,
            sizeof(d->shared_info->evtchn_mask));
 
+       /* initiate spinlock for pass virq */
+       spin_lock_init(&d->arch.arch_vmx.virq_assist_lock);
+
        /* Initialize the virtual interrupt lines */
        vmx_virq_line_init(d);
 
@@ -420,13 +457,5 @@ void vmx_setup_platform(struct domain *d
 
 void vmx_do_launch(struct vcpu *v)
 {
-       if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
-           printk("VMX domain bind port %d to vcpu %d failed!\n",
-               iopacket_port(v), v->vcpu_id);
-           domain_crash_synchronous();
-       }
-
-       clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
-
        vmx_load_all_rr(v);
 }
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Wed Aug 30 22:36:18 2006 +0100
@@ -195,7 +195,8 @@ vmx_load_all_rr(VCPU *vcpu)
                        (void *)vcpu->domain->shared_info,
                        (void *)vcpu->arch.privregs,
                        (void *)vcpu->arch.vhpt.hash, pal_vaddr );
-       ia64_set_pta(vcpu->arch.arch_vmx.mpta);
+       ia64_set_pta(VMX(vcpu, mpta));
+       ia64_set_dcr(VMX(vcpu, mdcr));
 
        ia64_srlz_d();
        ia64_set_psr(psr);
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_process.c   Wed Aug 30 22:36:18 2006 +0100
@@ -186,6 +186,7 @@ void leave_hypervisor_tail(struct pt_reg
 {
     struct domain *d = current->domain;
     struct vcpu *v = current;
+    int callback_irq;
     // FIXME: Will this work properly if doing an RFI???
     if (!is_idle_domain(d) ) { // always comes from guest
 //        struct pt_regs *user_regs = vcpu_regs(current);
@@ -212,6 +213,13 @@ void leave_hypervisor_tail(struct pt_reg
 //           VCPU(v, irr[0]) |= 1UL << 0x10;
 //           v->arch.irq_new_pending = 1;
 //       }
+
+        callback_irq = d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
+        if (callback_irq != 0 && local_events_need_delivery()) {
+            /*inject para-device call back irq*/
+            v->vcpu_info->evtchn_upcall_mask = 1;
+            vmx_vcpu_pend_interrupt(v, callback_irq);
+        }
 
         if ( v->arch.irq_new_pending ) {
             v->arch.irq_new_pending = 0;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vmx_support.c   Wed Aug 30 22:36:18 2006 +0100
@@ -1,4 +1,3 @@
-
 /* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
 /*
  * vmx_support.c: vmx specific support interface.
@@ -22,45 +21,11 @@
 #include <xen/config.h>
 #include <xen/sched.h>
 #include <xen/hypercall.h>
+#include <xen/event.h>
 #include <public/sched.h>
 #include <public/hvm/ioreq.h>
 #include <asm/vmx.h>
 #include <asm/vmx_vcpu.h>
-
-/*
- * I/O emulation should be atomic from domain point of view. However,
- * when emulation code is waiting for I/O completion by blocking,
- * other events like DM interrupt, VBD, etc. may come and unblock
- * current exection flow. So we have to prepare for re-block if unblocked
- * by non I/O completion event. After io emulation is done, re-enable
- * pending indicaion if other ports are pending
- */
-void vmx_wait_io(void)
-{
-    struct vcpu *v = current;
-    struct domain *d = v->domain;
-    int port = iopacket_port(v);
-
-    for (;;) {
-        if (test_and_clear_bit(0, &v->vcpu_info->evtchn_upcall_pending) &&
-            test_and_clear_bit(port / BITS_PER_LONG,
-                                     &v->vcpu_info->evtchn_pending_sel) &&
-            test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]))
-            vmx_io_assist(v);
-
-        if (!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
-            break;
-
-        do_sched_op_compat(SCHEDOP_block, 0);
-    }
-
-    /* re-enable indication if other pending events */
-    if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
-        set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
-
-    if (v->vcpu_info->evtchn_pending_sel)
-        set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
-}
 
 /*
  * Only place to call vmx_io_assist is mmio/legacy_io emulation.
@@ -83,17 +48,15 @@ void vmx_io_assist(struct vcpu *v)
 
     p = &vio->vp_ioreq;
 
-    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
-       if (p->state != STATE_IORESP_READY) {
-           /* Can't block here, for the same reason as other places to
-            * use vmx_wait_io. Simple return is safe since vmx_wait_io will
-            * try to block again
-            */
-           return; 
-       } else
-           p->state = STATE_INVALID;
-
-       clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+    if (p->state == STATE_IORESP_READY) {
+        p->state = STATE_INVALID;
+    }
+    else {
+        /* Can't block here, for the same reason as other places to
+         * use vmx_wait_io. Simple return is safe since vmx_wait_io will
+         * try to block again
+         */
+        return;
     }
 }
 
@@ -108,35 +71,62 @@ void vmx_io_assist(struct vcpu *v)
  */
 void vmx_intr_assist(struct vcpu *v)
 {
-    vcpu_iodata_t *vio;
-    struct domain *d = v->domain;
-    extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
-                                       unsigned long *pend_irr);
-    int port = iopacket_port(v);
-
-    if (test_bit(port, &d->shared_info->evtchn_pending[0]) ||
-       test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
-       vmx_wait_io();
-
-    /* I/O emulation is atomic, so it's impossible to see execution flow
-     * out of vmx_wait_io, when guest is still waiting for response.
-     */
-    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
-       panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation 
is done.\n");
-
-    /* Even without event pending, we still need to sync pending bits
-     * between DM and vlsapic. The reason is that interrupt delivery
-     * shares same event channel as I/O emulation, with corresponding
-     * indicator possibly cleared when vmx_wait_io().
-     */
-    vio = get_vio(v->domain, v->vcpu_id);
-    if (!vio)
-       panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", 
(unsigned long)vio);
-
 #ifdef V_IOSAPIC_READY
     /* Confirm virtual interrupt line signals, and set pending bits in vpd */
-    if(v->vcpu_id==0)
+    if (spin_trylock(&v->domain->arch.arch_vmx.virq_assist_lock)) {
         vmx_virq_line_assist(v);
+        spin_unlock(&v->domain->arch.arch_vmx.virq_assist_lock);
+    }
 #endif
     return;
 }
+
+void vmx_send_assist_req(struct vcpu *v)
+{
+    ioreq_t *p;
+
+    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+    if (unlikely(p->state != STATE_INVALID)) {
+        /* This indicates a bug in the device model.  Crash the
+           domain. */
+        printk("Device model set bad IO state %d.\n", p->state);
+        domain_crash(v->domain);
+        return;
+    }
+    wmb();
+    p->state = STATE_IOREQ_READY;
+    notify_via_xen_event_channel(v->arch.arch_vmx.xen_port);
+
+    /*
+     * Waiting for MMIO completion
+     *   like the wait_on_xen_event_channel() macro like...
+     *   but, we can't call do_softirq() at this point..
+     */
+    for (;;) {
+        if (p->state != STATE_IOREQ_READY &&
+            p->state != STATE_IOREQ_INPROCESS)
+            break;
+
+        set_bit(_VCPUF_blocked_in_xen, &current->vcpu_flags);
+        mb(); /* set blocked status /then/ re-evaluate condition */
+        if (p->state != STATE_IOREQ_READY &&
+            p->state != STATE_IOREQ_INPROCESS)
+        {
+            clear_bit(_VCPUF_blocked_in_xen, &current->vcpu_flags);
+            break;
+        }
+
+        /* I want to call __enter_scheduler() only */
+        do_sched_op_compat(SCHEDOP_yield, 0);
+        mb();
+    }
+
+    /* the code under this line is completer phase... */
+    vmx_io_assist(v);
+}
+
+/* Wake up a vcpu whihc is waiting for interrupts to come in */
+void vmx_prod_vcpu(struct vcpu *v)
+{
+    vcpu_unblock(v);
+}
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/vmx/vtlb.c  Wed Aug 30 22:36:18 2006 +0100
@@ -23,7 +23,7 @@
 
 #include <linux/sched.h>
 #include <asm/tlb.h>
-#include <asm/mm.h>
+#include <xen/mm.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/gcc_intrin.h>
 #include <linux/interrupt.h>
@@ -148,13 +148,17 @@ static void vmx_vhpt_insert(thash_cb_t *
     rr.rrval = ia64_get_rr(ifa);
     head = (thash_data_t *)ia64_thash(ifa);
     tag = ia64_ttag(ifa);
-    if( INVALID_VHPT(head) ) {
-        len = head->len;
-        head->page_flags = pte;
-        head->len = len;
-        head->itir = rr.ps << 2;
-        head->etag = tag;
-        return;
+    cch = head;
+    while (cch) {    
+        if (INVALID_VHPT(cch)) {
+            len = cch->len;
+            cch->page_flags = pte;
+            cch->len = len;
+            cch->itir = rr.ps << 2;
+            cch->etag = tag;
+            return;
+        }
+        cch = cch->next;
     }
 
     if(head->len>=MAX_CCN_DEPTH){
@@ -214,12 +218,22 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
 {
     u64 ret;
     thash_data_t * data;
+    PTA vpta;
+
     data = vhpt_lookup(iha);
     if (data == NULL) {
         data = vtlb_lookup(current, iha, DSIDE_TLB);
         if (data != NULL)
             thash_vhpt_insert(current, data->page_flags, data->itir ,iha);
     }
+
+    /* VHPT long format is not read.  */
+    vmx_vcpu_get_pta(current, &vpta.val);
+    if (vpta.vf == 1) {
+        *pte = 0;
+        return 0;
+    }
+
     asm volatile ("rsm psr.ic|psr.i;;"
                   "srlz.d;;"
                   "ld8.s r9=[%1];;"
@@ -231,11 +245,10 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
                   "ssm psr.ic;;"
                   "srlz.d;;"
                   "ssm psr.i;;"
-             : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+                  : "=r"(ret) : "r"(iha), "r"(pte):"memory");
     return ret;
 }
 
-
 /*
  *  purge software guest tlb
  */
@@ -243,28 +256,29 @@ void vtlb_purge(VCPU *v, u64 va, u64 ps)
 void vtlb_purge(VCPU *v, u64 va, u64 ps)
 {
     thash_data_t *cur;
-    u64 start, end, curadr, size, psbits, tag, def_size;
+    u64 start, curadr, size, psbits, tag, rr_ps, num;
     ia64_rr vrr;
     thash_cb_t *hcb = &v->arch.vtlb;
+
     vcpu_get_rr(v, va, &vrr.rrval);
     psbits = VMX(v, psbits[(va >> 61)]);
-    size = PSIZE(ps);
-    start = va & (-size);
-    end = start + size;
+    start = va & ~((1UL << ps) - 1);
     while (psbits) {
         curadr = start;
-        ps = __ffs(psbits);
-        psbits &= ~(1UL << ps);
-        def_size = PSIZE(ps);
-        vrr.ps = ps;
-        while (curadr < end) {
+        rr_ps = __ffs(psbits);
+        psbits &= ~(1UL << rr_ps);
+        num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
+        size = PSIZE(rr_ps);
+        vrr.ps = rr_ps;
+        while (num) {
             cur = vsa_thash(hcb->pta, curadr, vrr.rrval, &tag);
             while (cur) {
-                if (cur->etag == tag && cur->ps == ps)
+                if (cur->etag == tag && cur->ps == rr_ps)
                     cur->etag = 1UL << 63;
                 cur = cur->next;
             }
-            curadr += def_size;
+            curadr += size;
+            num--;
         }
     }
 }
@@ -277,14 +291,14 @@ static void vhpt_purge(VCPU *v, u64 va, 
 {
     //thash_cb_t *hcb = &v->arch.vhpt;
     thash_data_t *cur;
-    u64 start, end, size, tag;
+    u64 start, size, tag, num;
     ia64_rr rr;
-    size = PSIZE(ps);
-    start = va & (-size);
-    end = start + size;
-    rr.rrval = ia64_get_rr(va);
-    size = PSIZE(rr.ps);    
-    while(start < end){
+    
+    start = va & ~((1UL << ps) - 1);
+    rr.rrval = ia64_get_rr(va);  
+    size = PSIZE(rr.ps);
+    num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
+    while (num) {
         cur = (thash_data_t *)ia64_thash(start);
         tag = ia64_ttag(start);
         while (cur) {
@@ -293,6 +307,7 @@ static void vhpt_purge(VCPU *v, u64 va, 
             cur = cur->next;
         }
         start += size;
+        num--;
     }
     machine_tlb_purge(va, ps);
 }
@@ -347,24 +362,20 @@ void vtlb_insert(VCPU *v, u64 pte, u64 i
     u64 tag, len;
     thash_cb_t *hcb = &v->arch.vtlb;
     vcpu_get_rr(v, va, &vrr.rrval);
-#ifdef VTLB_DEBUG    
-    if (vrr.ps != itir_ps(itir)) {
-//        machine_tlb_insert(hcb->vcpu, entry);
-        panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d 
ps=%ld\n",
-             va, vrr.ps, itir_ps(itir));
-        return;
-    }
-#endif
     vrr.ps = itir_ps(itir);
     VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
     hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
-    if( INVALID_TLB(hash_table) ) {
-        len = hash_table->len;
-        hash_table->page_flags = pte;
-        hash_table->len = len;
-        hash_table->itir=itir;
-        hash_table->etag=tag;
-        return;
+    cch = hash_table;
+    while (cch) {
+        if (INVALID_TLB(cch)) {
+            len = cch->len;
+            cch->page_flags = pte;
+            cch->len = len;
+            cch->itir=itir;
+            cch->etag=tag;
+            return;
+        }
+        cch = cch->next;
     }
     if (hash_table->len>=MAX_CCN_DEPTH){
         thash_recycle_cch(hcb, hash_table);
@@ -458,10 +469,6 @@ void thash_purge_and_insert(VCPU *v, u64
     ps = itir_ps(itir);
     vcpu_get_rr(current, ifa, &vrr.rrval);
     mrr.rrval = ia64_get_rr(ifa);
-//    if (vrr.ps != itir_ps(itir)) {
-//        printf("not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
-//               ifa, vrr.ps, itir_ps(itir));
-//    }
     if(VMX_DOMAIN(v)){
         /* Ensure WB attribute if pte is related to a normal mem page,
          * which is required by vga acceleration since qemu maps shared
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/acpi.c
--- a/xen/arch/ia64/xen/acpi.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/acpi.c  Wed Aug 30 22:36:18 2006 +0100
@@ -51,6 +51,9 @@
 #include <asm/numa.h>
 #include <asm/sal.h>
 #include <asm/hw_irq.h>
+#ifdef XEN
+#include <xen/errno.h>
+#endif
 
 #define BAD_MADT_ENTRY(entry, end) (                                        \
                (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/dom0_ops.c      Wed Aug 30 22:36:18 2006 +0100
@@ -21,6 +21,7 @@
 #include <asm/vmx.h>
 #include <asm/dom_fw.h>
 #include <xen/iocap.h>
+#include <xen/errno.h>
 
 void build_physmap_table(struct domain *d);
 
@@ -39,8 +40,8 @@ long arch_do_domctl(xen_domctl_t *op, XE
     {
         unsigned long i;
         struct domain *d = find_domain_by_id(op->domain);
-        unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
-        unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
+        unsigned long start_page = op->u.getmemlist.start_pfn;
+        unsigned long nr_pages = op->u.getmemlist.max_pfns;
         unsigned long mfn;
 
         if ( d == NULL ) {
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/dom_fw.c        Wed Aug 30 22:36:18 2006 +0100
@@ -21,28 +21,23 @@
 #include <asm/fpswa.h>
 #include <xen/version.h>
 #include <xen/acpi.h>
+#include <xen/errno.h>
 
 #include <asm/dom_fw.h>
 #include <asm/bundle.h>
 
-static void dom_fw_init (struct domain *d, struct ia64_boot_param *bp, char 
*fw_mem, int fw_mem_size, unsigned long maxmem);
-
-extern struct domain *dom0;
+#define ONE_MB (1UL << 20)
 
 extern unsigned long running_on_sim;
 
-/* Note: two domains cannot be created simulteanously!  */
-static unsigned long dom_fw_base_mpa = -1;
-static unsigned long imva_fw_base = -1;
-
 #define FW_VENDOR 
"X\0e\0n\0/\0i\0a\0\066\0\064\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
 
-#define MAKE_MD(typ, attr, start, end, abs)                            \
+#define MAKE_MD(typ, attr, start, end)                                         
\
        do {                                                            \
-               md = efi_memmap + i++;                                  \
+               md = tables->efi_memmap + i++;                          \
                md->type = typ;                                         \
                md->pad = 0;                                            \
-               md->phys_addr = abs ? start : start_mpaddr + start;     \
+               md->phys_addr = start;                                  \
                md->virt_addr = 0;                                      \
                md->num_pages = (end - start) >> EFI_PAGE_SHIFT;        \
                md->attribute = attr;                                   \
@@ -51,44 +46,31 @@ static unsigned long imva_fw_base = -1;
 #define EFI_HYPERCALL_PATCH(tgt, call)                                 \
        do {                                                            \
                dom_efi_hypercall_patch(d, FW_HYPERCALL_##call##_PADDR, \
-                                       FW_HYPERCALL_##call);           \
-               tgt = dom_pa((unsigned long) pfn);                      \
-               *pfn++ = FW_HYPERCALL_##call##_PADDR + start_mpaddr;    \
-               *pfn++ = 0;                                             \
+                                FW_HYPERCALL_##call, hypercalls_imva); \
+               /* Descriptor address.  */                              \
+               tables->efi_runtime.tgt =                               \
+                                   FW_FIELD_MPA(func_ptrs) + 8 * pfn;  \
+               /* Descriptor.  */                                      \
+               tables->func_ptrs[pfn++] = FW_HYPERCALL_##call##_PADDR; \
+               tables->func_ptrs[pfn++] = 0;                           \
        } while (0)
-
-// return domain (meta)physical address for a given imva
-// this function is a call-back from dom_fw_init
-static unsigned long
-dom_pa(unsigned long imva)
-{
-       if (dom_fw_base_mpa == -1 || imva_fw_base == -1) {
-               printf("dom_pa: uninitialized! (spinning...)\n");
-               while(1);
-       }
-       if (imva - imva_fw_base > PAGE_SIZE) {
-               printf("dom_pa: bad offset! imva=0x%lx, imva_fw_base=0x%lx 
(spinning...)\n",
-                       imva, imva_fw_base);
-               while(1);
-       }
-       return dom_fw_base_mpa + (imva - imva_fw_base);
-}
 
 // allocate a page for fw
 // build_physmap_table() which is called by new_thread()
 // does for domU.
-#define ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, mpaddr)   \
-    do {                                            \
-        if ((d) == dom0) {                          \
-            assign_new_domain0_page((d), (mpaddr)); \
-        }                                           \
-    } while (0)
+static inline void
+assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
+{
+        if (d == dom0)
+            assign_new_domain0_page(d, mpaddr);
+}
 
 /**************************************************************************
 Hypercall bundle creation
 **************************************************************************/
 
-static void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, 
UINT64 ret)
+static void
+build_hypercall_bundle(u64 *imva, u64 brkimm, u64 hypnum, u64 ret)
 {
        INST64_A5 slot0;
        INST64_I19 slot1;
@@ -104,8 +86,8 @@ static void build_hypercall_bundle(UINT6
        slot1.inst = 0;
        slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
        slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
-       // if ret slot2: br.ret.sptk.many rp
-       // else slot2: br.cond.sptk.many rp
+       // if ret slot2:  br.ret.sptk.many rp
+       // else   slot2:  br.cond.sptk.many rp
        slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
        slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
        if (ret) {
@@ -125,7 +107,8 @@ static void build_hypercall_bundle(UINT6
        ia64_fc(imva + 1);
 }
 
-static void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 
hypnum)
+static void
+build_pal_hypercall_bundles(u64 *imva, u64 brkimm, u64 hypnum)
 {
        extern unsigned long pal_call_stub[];
        IA64_BUNDLE bundle;
@@ -162,76 +145,68 @@ static void build_pal_hypercall_bundles(
 }
 
 // builds a hypercall bundle at domain physical address
-static void dom_fpswa_hypercall_patch(struct domain *d)
+static void
+dom_fpswa_hypercall_patch(struct domain *d, unsigned long imva)
 {
        unsigned long *entry_imva, *patch_imva;
-       unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
-       unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
-
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, entry_paddr);
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, patch_paddr);
-       entry_imva = domain_mpa_to_imva(d, entry_paddr);
-       patch_imva = domain_mpa_to_imva(d, patch_paddr);
-
+       const unsigned long entry_paddr = FW_HYPERCALL_FPSWA_ENTRY_PADDR;
+       const unsigned long patch_paddr = FW_HYPERCALL_FPSWA_PATCH_PADDR;
+
+       entry_imva = (unsigned long *)(imva + entry_paddr -
+                                      FW_HYPERCALL_BASE_PADDR);
+       patch_imva = (unsigned long *)(imva + patch_paddr -
+                                      FW_HYPERCALL_BASE_PADDR);
+
+       /* Descriptor.  */
        *entry_imva++ = patch_paddr;
        *entry_imva   = 0;
-       build_hypercall_bundle(patch_imva, d->arch.breakimm, 
FW_HYPERCALL_FPSWA, 1);
+
+       build_hypercall_bundle(patch_imva, d->arch.breakimm,
+                              FW_HYPERCALL_FPSWA, 1);
 }
 
 // builds a hypercall bundle at domain physical address
-static void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, 
unsigned long hypercall)
-{
-       unsigned long *imva;
-
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, paddr);
-       imva = domain_mpa_to_imva(d, paddr);
-       build_hypercall_bundle(imva, d->arch.breakimm, hypercall, 1);
+static void
+dom_efi_hypercall_patch(struct domain *d, unsigned long paddr,
+                        unsigned long hypercall, unsigned long imva)
+{
+       build_hypercall_bundle((u64 *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
+                              d->arch.breakimm, hypercall, 1);
 }
 
 // builds a hypercall bundle at domain physical address
-static void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, 
unsigned long hypercall,unsigned long ret)
-{
-       unsigned long *imva;
-
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, paddr);
-       imva = domain_mpa_to_imva(d, paddr);
-       build_hypercall_bundle(imva, d->arch.breakimm, hypercall, ret);
-}
-
-static void dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr)
-{
-       unsigned long *imva;
-
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, paddr);
-       imva = domain_mpa_to_imva(d, paddr);
-       build_pal_hypercall_bundles(imva, d->arch.breakimm, 
FW_HYPERCALL_PAL_CALL);
-}
-
-
-void dom_fw_setup(struct domain *d, unsigned long bp_mpa, unsigned long maxmem)
-{
-       struct ia64_boot_param *bp;
-
-       dom_fw_base_mpa = 0;
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, dom_fw_base_mpa);
-       imva_fw_base = (unsigned long) domain_mpa_to_imva(d, dom_fw_base_mpa);
-       ASSIGN_NEW_DOMAIN_PAGE_IF_DOM0(d, bp_mpa);
-       bp = domain_mpa_to_imva(d, bp_mpa);
-       dom_fw_init(d, bp, (char *) imva_fw_base, PAGE_SIZE, maxmem);
-}
-
-
-/* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
-
-#define NFUNCPTRS 20
+static void
+dom_fw_hypercall_patch(struct domain *d, unsigned long paddr,
+                       unsigned long hypercall,unsigned long ret,
+                       unsigned long imva)
+{
+       build_hypercall_bundle((u64 *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
+                              d->arch.breakimm, hypercall, ret);
+}
+
+static void
+dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr,
+                           unsigned long imva)
+{
+       build_pal_hypercall_bundles((u64*)(imva + paddr -
+                                   FW_HYPERCALL_BASE_PADDR),
+                                   d->arch.breakimm, FW_HYPERCALL_PAL_CALL);
+}
 
 static inline void
 print_md(efi_memory_desc_t *md)
 {
-       printk("domain mem: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) 
(%luMB)\n",
-               md->type, md->attribute, md->phys_addr,
-               md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-               md->num_pages >> (20 - EFI_PAGE_SHIFT));
+       u64 size;
+       
+       printk("dom mem: type=%2u, attr=0x%016lx, range=[0x%016lx-0x%016lx) ",
+              md->type, md->attribute, md->phys_addr,
+              md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
+
+       size = md->num_pages << EFI_PAGE_SHIFT;
+       if (size > ONE_MB)
+               printf ("(%luMB)\n", size >> 20);
+       else
+               printf ("(%luKB)\n", size >> 10);
 }
 
 static u32 lsapic_nbr;
@@ -316,6 +291,8 @@ struct fake_acpi_tables {
        u8 pm1a_cnt_blk[1];
        u8 pm_tmr_blk[4];
 };
+#define ACPI_TABLE_MPA(field) \
+  FW_ACPI_BASE_PADDR + offsetof(struct fake_acpi_tables, field);
 
 /* Create enough of an ACPI structure to make the guest OS ACPI happy. */
 static void
@@ -345,8 +322,8 @@ dom_fw_fake_acpi(struct domain *d, struc
        xsdt->asl_compiler_revision = (xen_major_version() << 16) |
                xen_minor_version();
 
-       xsdt->table_offset_entry[0] = dom_pa((unsigned long) fadt);
-       tables->madt_ptr = dom_pa((unsigned long) madt);
+       xsdt->table_offset_entry[0] = ACPI_TABLE_MPA(fadt);
+       tables->madt_ptr = ACPI_TABLE_MPA(madt);
 
        xsdt->checksum = generate_acpi_checksum(xsdt, xsdt->length);
 
@@ -364,8 +341,8 @@ dom_fw_fake_acpi(struct domain *d, struc
        facs->version = 1;
        facs->length = sizeof(struct facs_descriptor_rev2);
 
-       fadt->xfirmware_ctrl = dom_pa((unsigned long) facs);
-       fadt->Xdsdt = dom_pa((unsigned long) dsdt);
+       fadt->xfirmware_ctrl = ACPI_TABLE_MPA(facs);
+       fadt->Xdsdt = ACPI_TABLE_MPA(dsdt);
 
        /*
         * All of the below FADT entries are filled it to prevent warnings
@@ -375,15 +352,15 @@ dom_fw_fake_acpi(struct domain *d, struc
        fadt->pm1_evt_len = 4;
        fadt->xpm1a_evt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm1a_evt_blk.register_bit_width = 8;
-       fadt->xpm1a_evt_blk.address = dom_pa((unsigned long) 
&tables->pm1a_evt_blk);
+       fadt->xpm1a_evt_blk.address = ACPI_TABLE_MPA(pm1a_evt_blk);
        fadt->pm1_cnt_len = 1;
        fadt->xpm1a_cnt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm1a_cnt_blk.register_bit_width = 8;
-       fadt->xpm1a_cnt_blk.address = dom_pa((unsigned long) 
&tables->pm1a_cnt_blk);
+       fadt->xpm1a_cnt_blk.address = ACPI_TABLE_MPA(pm1a_cnt_blk);
        fadt->pm_tm_len = 4;
        fadt->xpm_tmr_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm_tmr_blk.register_bit_width = 8;
-       fadt->xpm_tmr_blk.address = dom_pa((unsigned long) &tables->pm_tmr_blk);
+       fadt->xpm_tmr_blk.address = ACPI_TABLE_MPA(pm_tmr_blk);
 
        fadt->checksum = generate_acpi_checksum(fadt, fadt->length);
 
@@ -392,7 +369,7 @@ dom_fw_fake_acpi(struct domain *d, struc
        strcpy(rsdp->oem_id, "XEN");
        rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
        rsdp->length = sizeof(struct acpi20_table_rsdp);
-       rsdp->xsdt_address = dom_pa((unsigned long) xsdt);
+       rsdp->xsdt_address = ACPI_TABLE_MPA(xsdt);
 
        rsdp->checksum = generate_acpi_checksum(rsdp,
                                                ACPI_RSDP_CHECKSUM_LENGTH);
@@ -467,115 +444,6 @@ dom_fw_fake_acpi(struct domain *d, struc
        return;
 }
 
-#define NUM_EFI_SYS_TABLES 6
-#define NUM_MEM_DESCS  64 //large enough
-
-struct dom0_passthrough_arg {
-    struct domain*      d;
-    int                 flags;
-    efi_memory_desc_t *md;
-    int*                i;
-};
-
-static int
-dom_fw_dom0_passthrough(efi_memory_desc_t *md, void *arg__)
-{
-    struct dom0_passthrough_arg* arg = (struct dom0_passthrough_arg*)arg__;
-    unsigned long paddr;
-    struct domain* d = arg->d;
-    u64 start = md->phys_addr;
-    u64 size = md->num_pages << EFI_PAGE_SHIFT;
-
-    if (md->type == EFI_MEMORY_MAPPED_IO ||
-        md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
-
-        //XXX some machine has large mmio area whose size is about several TB.
-        //    It requires impractical memory to map such a huge region
-        //    to a domain.
-        //    For now we don't map it, but later we must fix this.
-        if (md->type == EFI_MEMORY_MAPPED_IO && (size > 0x100000000UL))
-            return 0;
-
-        paddr = assign_domain_mmio_page(d, start, size);
-    } else
-        paddr = assign_domain_mach_page(d, start, size, arg->flags);
-
-    BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
-           md->type != EFI_RUNTIME_SERVICES_DATA &&
-           md->type != EFI_ACPI_RECLAIM_MEMORY &&
-           md->type != EFI_ACPI_MEMORY_NVS &&
-           md->type != EFI_RESERVED_TYPE &&
-           md->type != EFI_MEMORY_MAPPED_IO &&
-           md->type != EFI_MEMORY_MAPPED_IO_PORT_SPACE);
-
-    arg->md->type = md->type;
-    arg->md->pad = 0;
-    arg->md->phys_addr = paddr;
-    arg->md->virt_addr = 0;
-    arg->md->num_pages = md->num_pages;
-    arg->md->attribute = md->attribute;
-
-    (*arg->i)++;
-    arg->md++;
-    return 0;
-}
-
-/*
- * Create dom0 MDT entries for conventional memory below 1MB.  Without
- * this Linux will assume VGA is present because 0xA0000 will always
- * be either a hole in the MDT or an I/O region via the passthrough.
- */
-static int
-dom_fw_dom0_lowmem(efi_memory_desc_t *md, void *arg__)
-{
-    struct dom0_passthrough_arg* arg = (struct dom0_passthrough_arg*)arg__;
-    u64 end = min(HYPERCALL_START,
-                  md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
-
-    BUG_ON(md->type != EFI_CONVENTIONAL_MEMORY);
-
-    /* avoid hypercall area */
-    if (md->phys_addr >= HYPERCALL_START)
-        return 0;
-
-    /* avoid firmware base area */
-    if (md->phys_addr < dom_pa(imva_fw_base))
-        end = min(end, dom_pa(imva_fw_base));
-    else if (md->phys_addr < dom_pa(imva_fw_base + PAGE_SIZE)) {
-        if (end < dom_pa(imva_fw_base + PAGE_SIZE))
-            return 0;
-        md->phys_addr = dom_pa(imva_fw_base + PAGE_SIZE);
-    }
-
-    arg->md->type = md->type;
-    arg->md->pad = 0;
-    arg->md->phys_addr = md->phys_addr;
-    arg->md->virt_addr = 0;
-    arg->md->num_pages = (end - md->phys_addr) >> EFI_PAGE_SHIFT;
-    arg->md->attribute = md->attribute;
-
-    (*arg->i)++;
-    arg->md++;
-
-    /* if firmware area spliced the md, add the upper part here */
-    if (end == dom_pa(imva_fw_base)) {
-        end = min(HYPERCALL_START,
-                  md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT));
-       if (end > dom_pa(imva_fw_base + PAGE_SIZE)) {
-            arg->md->type = md->type;
-            arg->md->pad = 0;
-            arg->md->phys_addr = dom_pa(imva_fw_base + PAGE_SIZE);
-            arg->md->virt_addr = 0;
-            arg->md->num_pages = (end - arg->md->phys_addr) >> EFI_PAGE_SHIFT;
-            arg->md->attribute = md->attribute;
-
-            (*arg->i)++;
-            arg->md++;
-        }
-    }
-    return 0;
-}
-
 static int
 efi_mdt_cmp(const void *a, const void *b)
 {
@@ -595,279 +463,403 @@ efi_mdt_cmp(const void *a, const void *b
        return 0;
 }
 
+#define NFUNCPTRS 16
+#define NUM_EFI_SYS_TABLES 6
+#define NUM_MEM_DESCS 64 //large enough
+
+struct fw_tables {
+       efi_system_table_t efi_systab;
+       efi_runtime_services_t efi_runtime;
+       efi_config_table_t efi_tables[NUM_EFI_SYS_TABLES];
+
+       struct ia64_sal_systab sal_systab;
+       struct ia64_sal_desc_entry_point sal_ed;
+       struct ia64_sal_desc_ap_wakeup sal_wakeup;
+       /* End of SAL descriptors.  Do not forget to update checkum bound.  */
+
+       fpswa_interface_t fpswa_inf;
+       efi_memory_desc_t efi_memmap[NUM_MEM_DESCS];
+       unsigned long func_ptrs[2*NFUNCPTRS];
+       struct xen_sal_data sal_data;
+       unsigned char fw_vendor[sizeof(FW_VENDOR)];
+};
+#define FW_FIELD_MPA(field) \
+   FW_TABLES_BASE_PADDR + offsetof(struct fw_tables, field)
+
+/* Complete the dom0 memmap.  */
+static int
+complete_dom0_memmap(struct domain *d,
+                     struct fw_tables *tables,
+                     unsigned long maxmem,
+                     int num_mds)
+{
+       efi_memory_desc_t *md;
+       u64 addr;
+       int j;
+       void *efi_map_start, *efi_map_end, *p;
+       u64 efi_desc_size;
+       int i;
+
+       /* Walk through all MDT entries.
+          Copy all interesting entries.  */
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               const efi_memory_desc_t *md = p;
+               efi_memory_desc_t *dom_md = &tables->efi_memmap[num_mds];
+               u64 start = md->phys_addr;
+               u64 size = md->num_pages << EFI_PAGE_SHIFT;
+               u64 end = start + size;
+
+               switch (md->type) {
+               case EFI_RUNTIME_SERVICES_CODE:
+               case EFI_RUNTIME_SERVICES_DATA:
+               case EFI_ACPI_RECLAIM_MEMORY:
+               case EFI_ACPI_MEMORY_NVS:
+               case EFI_RESERVED_TYPE:
+                       /* Map into dom0 - All these are writable.  */
+                       assign_domain_mach_page(d, start, size,
+                                               ASSIGN_writable);
+                       /* Fall-through.  */
+               case EFI_MEMORY_MAPPED_IO:
+                       /* Will be mapped with ioremap.  */
+                       /* Copy descriptor.  */
+                       *dom_md = *md;
+                       dom_md->virt_addr = 0;
+                       num_mds++;
+                       break;
+
+               case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+                       /* Map into dom0.  */
+                       assign_domain_mmio_page(d, start, size);
+                       /* Copy descriptor.  */
+                       *dom_md = *md;
+                       dom_md->virt_addr = 0;
+                       num_mds++;
+                       break;
+
+               case EFI_CONVENTIONAL_MEMORY:
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+                       /* Create dom0 MDT entries for conventional memory
+                          below 1MB.  Without this Linux will assume VGA is
+                          present because 0xA0000 will always be either a hole
+                          in the MDT or an I/O region via the passthrough.  */
+
+                       end = min(ONE_MB, end);
+
+                       /* Avoid firmware and hypercall area.
+                          We know they are 0-based.  */
+                       if (end < FW_END_PADDR || start >= ONE_MB)
+                               break;
+                       if (start < FW_END_PADDR)
+                               start = FW_END_PADDR;
+                       
+                       dom_md->type = EFI_CONVENTIONAL_MEMORY;
+                       dom_md->phys_addr = start;
+                       dom_md->virt_addr = 0;
+                       dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
+                       dom_md->attribute = md->attribute;
+                       num_mds++;
+                       break;
+
+               case EFI_UNUSABLE_MEMORY:
+               case EFI_PAL_CODE:
+                       /* Discard.  */
+                       break;
+
+               default:
+                       /* Print a warning but continue.  */
+                       printf("complete_dom0_memmap: warning: "
+                              "unhandled MDT entry type %u\n", md->type);
+               }
+       }
+       BUG_ON(num_mds > NUM_MEM_DESCS);
+       
+       sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
+            efi_mdt_cmp, NULL);
+
+       /* find gaps and fill them with conventional memory */
+       i = num_mds;
+       for (j = 0; j < num_mds; j++) {
+               unsigned long end;
+               unsigned long next_start;
+               
+               md = &tables->efi_memmap[j];
+               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+               
+               if (j + 1 < num_mds) {
+                       efi_memory_desc_t* next_md;
+                       next_md = &tables->efi_memmap[j + 1];
+                       next_start = next_md->phys_addr;
+                       
+                       /* Have just been sorted.  */
+                       BUG_ON(end > next_start);
+                       
+                       /* No room for memory!  */
+                       if (end == next_start)
+                               continue;
+                       
+                       if (next_start > maxmem)
+                               next_start = maxmem;
+               }
+               else
+                       next_start = maxmem;
+               
+               /* Avoid "legacy" low memory addresses 
+                  and the HYPERCALL area.  */
+               if (end < ONE_MB)
+                       end = ONE_MB;
+                                                     
+               // clip the range and align to PAGE_SIZE
+               next_start = next_start & PAGE_MASK;
+               end = PAGE_ALIGN(end);
+               
+               /* No room for memory.  */
+               if (end >= next_start)
+                       continue;
+               
+               MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
+                       end, next_start);
+
+               if (next_start >= maxmem)
+                       break;
+       }
+       num_mds = i;
+       BUG_ON(num_mds > NUM_MEM_DESCS);
+       sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
+            efi_mdt_cmp, NULL);
+
+       // dom0 doesn't need build_physmap_table()
+       // see arch_set_info_guest()
+       // instead we allocate pages manually.
+       for (i = 0; i < num_mds; i++) {
+               md = &tables->efi_memmap[i];
+               if (md->phys_addr > maxmem)
+                       break;
+               
+               if (md->type == EFI_LOADER_DATA ||
+                   md->type == EFI_PAL_CODE ||
+                   md->type == EFI_CONVENTIONAL_MEMORY) {
+                       unsigned long start = md->phys_addr & PAGE_MASK;
+                       unsigned long end = md->phys_addr +
+                               (md->num_pages << EFI_PAGE_SHIFT);
+
+                       if (end == start) {
+                               /* md->num_pages = 0 is allowed. */
+                               continue;
+                       }
+                       if (end > (max_page << PAGE_SHIFT))
+                               end = (max_page << PAGE_SHIFT);
+                       
+                       for (addr = start; addr < end; addr += PAGE_SIZE)
+                               assign_new_domain0_page(d, addr);
+               }
+       }
+       // Map low-memory holes & unmapped MMIO for legacy drivers
+       for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
+               if (domain_page_mapped(d, addr))
+                       continue;
+               
+               if (efi_mmio(addr, PAGE_SIZE))
+                       assign_domain_mmio_page(d, addr, PAGE_SIZE);
+       }
+       return num_mds;
+}
+       
 static void
-dom_fw_init (struct domain *d, struct ia64_boot_param *bp, char *fw_mem, int 
fw_mem_size, unsigned long maxmem)
-{
-       efi_system_table_t *efi_systab;
-       efi_runtime_services_t *efi_runtime;
-       efi_config_table_t *efi_tables;
-       struct ia64_sal_systab *sal_systab;
-       struct ia64_sal_desc_entry_point *sal_ed;
-       struct ia64_sal_desc_ap_wakeup *sal_wakeup;
-       fpswa_interface_t *fpswa_inf;
-       efi_memory_desc_t *efi_memmap, *md;
-       struct xen_sal_data *sal_data;
-       unsigned long *pfn;
-       unsigned char checksum = 0;
-       char *cp, *fw_vendor;
-       int num_mds, j, i = 0;
-       const unsigned long start_mpaddr = 0;
-
-/* FIXME: should check size but for now we have a whole MB to play with.
-   And if stealing code from fw-emu.c, watch out for new fw_vendor on the end!
-       if (fw_mem_size < sizeof(fw_mem_proto)) {
-               printf("sys_fw_init: insufficient space for fw_mem\n");
-               return 0;
-       }
-*/
-       memset(fw_mem, 0, fw_mem_size);
-
-       cp = fw_mem;
-       efi_systab  = (void *) cp; cp += sizeof(*efi_systab);
-       efi_runtime = (void *) cp; cp += sizeof(*efi_runtime);
-       efi_tables  = (void *) cp; cp += NUM_EFI_SYS_TABLES * 
sizeof(*efi_tables);
-       sal_systab  = (void *) cp; cp += sizeof(*sal_systab);
-       sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
-       sal_wakeup  = (void *) cp; cp += sizeof(*sal_wakeup);
-       fpswa_inf   = (void *) cp; cp += sizeof(*fpswa_inf);
-       efi_memmap  = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
-       pfn         = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
-       sal_data    = (void *) cp; cp += sizeof(*sal_data);
+dom_fw_init(struct domain *d,
+            struct ia64_boot_param *bp,
+            struct fw_tables *tables,
+            unsigned long hypercalls_imva,
+            unsigned long maxmem)
+{
+       efi_memory_desc_t *md;
+       unsigned long pfn;
+       unsigned char checksum;
+       char *cp;
+       int num_mds, i;
+
+       memset(tables, 0, sizeof(struct fw_tables));
 
        /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
-       d->arch.efi_runtime = efi_runtime;
-       d->arch.fpswa_inf   = fpswa_inf;
-       d->arch.sal_data    = sal_data;
-
-       memset(efi_systab, 0, sizeof(efi_systab));
-       efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
-       efi_systab->hdr.revision  = EFI_SYSTEM_TABLE_REVISION;
-       efi_systab->hdr.headersize = sizeof(efi_systab->hdr);
-       fw_vendor = cp;
-       cp += sizeof(FW_VENDOR) + (8-((unsigned long)cp & 7)); // round to 
64-bit boundary
-
-       memcpy(fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
-       efi_systab->fw_vendor = dom_pa((unsigned long) fw_vendor);
-       efi_systab->fw_revision = 1;
-       efi_systab->runtime = (void *) dom_pa((unsigned long) efi_runtime);
-       efi_systab->nr_tables = NUM_EFI_SYS_TABLES;
-       efi_systab->tables = dom_pa((unsigned long) efi_tables);
-
-       efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
-       efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
-       efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
-
-       EFI_HYPERCALL_PATCH(efi_runtime->get_time,EFI_GET_TIME);
-       EFI_HYPERCALL_PATCH(efi_runtime->set_time,EFI_SET_TIME);
-       EFI_HYPERCALL_PATCH(efi_runtime->get_wakeup_time,EFI_GET_WAKEUP_TIME);
-       EFI_HYPERCALL_PATCH(efi_runtime->set_wakeup_time,EFI_SET_WAKEUP_TIME);
-       
EFI_HYPERCALL_PATCH(efi_runtime->set_virtual_address_map,EFI_SET_VIRTUAL_ADDRESS_MAP);
-       EFI_HYPERCALL_PATCH(efi_runtime->get_variable,EFI_GET_VARIABLE);
-       
EFI_HYPERCALL_PATCH(efi_runtime->get_next_variable,EFI_GET_NEXT_VARIABLE);
-       EFI_HYPERCALL_PATCH(efi_runtime->set_variable,EFI_SET_VARIABLE);
-       
EFI_HYPERCALL_PATCH(efi_runtime->get_next_high_mono_count,EFI_GET_NEXT_HIGH_MONO_COUNT);
-       EFI_HYPERCALL_PATCH(efi_runtime->reset_system,EFI_RESET_SYSTEM);
-
-       efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
-       efi_tables[0].table = dom_pa((unsigned long) sal_systab);
+       d->arch.efi_runtime = &tables->efi_runtime;
+       d->arch.fpswa_inf   = &tables->fpswa_inf;
+       d->arch.sal_data    = &tables->sal_data;
+
+       /* EFI systab.  */
+       tables->efi_systab.hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
+       tables->efi_systab.hdr.revision  = EFI_SYSTEM_TABLE_REVISION;
+       tables->efi_systab.hdr.headersize = sizeof(tables->efi_systab.hdr);
+
+       memcpy(tables->fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
+       tables->efi_systab.fw_vendor = FW_FIELD_MPA(fw_vendor);
+       tables->efi_systab.fw_revision = 1;
+       tables->efi_systab.runtime = (void *)FW_FIELD_MPA(efi_runtime);
+       tables->efi_systab.nr_tables = NUM_EFI_SYS_TABLES;
+       tables->efi_systab.tables = FW_FIELD_MPA(efi_tables);
+
+       /* EFI runtime.  */
+       tables->efi_runtime.hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
+       tables->efi_runtime.hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
+       tables->efi_runtime.hdr.headersize = sizeof(tables->efi_runtime.hdr);
+
+       pfn = 0;
+       EFI_HYPERCALL_PATCH(get_time,EFI_GET_TIME);
+       EFI_HYPERCALL_PATCH(set_time,EFI_SET_TIME);
+       EFI_HYPERCALL_PATCH(get_wakeup_time,EFI_GET_WAKEUP_TIME);
+       EFI_HYPERCALL_PATCH(set_wakeup_time,EFI_SET_WAKEUP_TIME);
+       EFI_HYPERCALL_PATCH(set_virtual_address_map,
+                           EFI_SET_VIRTUAL_ADDRESS_MAP);
+       EFI_HYPERCALL_PATCH(get_variable,EFI_GET_VARIABLE);
+       EFI_HYPERCALL_PATCH(get_next_variable,EFI_GET_NEXT_VARIABLE);
+       EFI_HYPERCALL_PATCH(set_variable,EFI_SET_VARIABLE);
+       EFI_HYPERCALL_PATCH(get_next_high_mono_count,
+                           EFI_GET_NEXT_HIGH_MONO_COUNT);
+       EFI_HYPERCALL_PATCH(reset_system,EFI_RESET_SYSTEM);
+
+       /* System tables.  */
+       tables->efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
+       tables->efi_tables[0].table = FW_FIELD_MPA(sal_systab);
        for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
-               efi_tables[i].guid = NULL_GUID;
-               efi_tables[i].table = 0;
-       }
+               tables->efi_tables[i].guid = NULL_GUID;
+               tables->efi_tables[i].table = 0;
+       }
+       i = 1;
        if (d == dom0) {
+               /* Write messages to the console.  */
+               touch_acpi_table();
+
                printf("Domain0 EFI passthrough:");
-               i = 1;
                if (efi.mps) {
-                       efi_tables[i].guid = MPS_TABLE_GUID;
-                       efi_tables[i].table = __pa(efi.mps);
-                       printf(" MPS=0x%lx",efi_tables[i].table);
+                       tables->efi_tables[i].guid = MPS_TABLE_GUID;
+                       tables->efi_tables[i].table = __pa(efi.mps);
+                       printf(" MPS=0x%lx",tables->efi_tables[i].table);
                        i++;
                }
 
-               touch_acpi_table();
-
                if (efi.acpi20) {
-                       efi_tables[i].guid = ACPI_20_TABLE_GUID;
-                       efi_tables[i].table = __pa(efi.acpi20);
-                       printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
+                       tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
+                       tables->efi_tables[i].table = __pa(efi.acpi20);
+                       printf(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
                        i++;
                }
                if (efi.acpi) {
-                       efi_tables[i].guid = ACPI_TABLE_GUID;
-                       efi_tables[i].table = __pa(efi.acpi);
-                       printf(" ACPI=0x%lx",efi_tables[i].table);
+                       tables->efi_tables[i].guid = ACPI_TABLE_GUID;
+                       tables->efi_tables[i].table = __pa(efi.acpi);
+                       printf(" ACPI=0x%lx",tables->efi_tables[i].table);
                        i++;
                }
                if (efi.smbios) {
-                       efi_tables[i].guid = SMBIOS_TABLE_GUID;
-                       efi_tables[i].table = __pa(efi.smbios);
-                       printf(" SMBIOS=0x%lx",efi_tables[i].table);
+                       tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
+                       tables->efi_tables[i].table = __pa(efi.smbios);
+                       printf(" SMBIOS=0x%lx",tables->efi_tables[i].table);
                        i++;
                }
                if (efi.hcdp) {
-                       efi_tables[i].guid = HCDP_TABLE_GUID;
-                       efi_tables[i].table = __pa(efi.hcdp);
-                       printf(" HCDP=0x%lx",efi_tables[i].table);
+                       tables->efi_tables[i].guid = HCDP_TABLE_GUID;
+                       tables->efi_tables[i].table = __pa(efi.hcdp);
+                       printf(" HCDP=0x%lx",tables->efi_tables[i].table);
                        i++;
                }
                printf("\n");
        } else {
                printf("DomainU EFI build up:");
-               i = 1;
-
-               if ((unsigned long)fw_mem + fw_mem_size - (unsigned long)cp >=
-                   sizeof(struct fake_acpi_tables)) {
-                       struct fake_acpi_tables *acpi_tables;
-
-                       acpi_tables = (void *)cp;
-                       cp += sizeof(struct fake_acpi_tables);
-                       dom_fw_fake_acpi(d, acpi_tables);
-
-                       efi_tables[i].guid = ACPI_20_TABLE_GUID;
-                       efi_tables[i].table = dom_pa((unsigned long) 
acpi_tables);
-                       printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
-                       i++;
-               }
+
+               tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
+               tables->efi_tables[i].table = FW_ACPI_BASE_PADDR;
+               printf(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
+               i++;
                printf("\n");
        }
 
        /* fill in the SAL system table: */
-       memcpy(sal_systab->signature, "SST_", 4);
-       sal_systab->size = sizeof(*sal_systab);
-       sal_systab->sal_rev_minor = 1;
-       sal_systab->sal_rev_major = 0;
-       sal_systab->entry_count = 2;
-
-       strcpy((char *)sal_systab->oem_id, "Xen/ia64");
-       strcpy((char *)sal_systab->product_id, "Xen/ia64");
-
-       /* fill in an entry point: */
-       sal_ed->type = SAL_DESC_ENTRY_POINT;
-       sal_ed->pal_proc = FW_HYPERCALL_PAL_CALL_PADDR + start_mpaddr;
-       dom_fw_pal_hypercall_patch (d, sal_ed->pal_proc);
-       sal_ed->sal_proc = FW_HYPERCALL_SAL_CALL_PADDR + start_mpaddr;
-       dom_fw_hypercall_patch (d, sal_ed->sal_proc, FW_HYPERCALL_SAL_CALL, 1);
-       sal_ed->gp = 0;  // will be ignored
+       memcpy(tables->sal_systab.signature, "SST_", 4);
+       tables->sal_systab.size = sizeof(tables->sal_systab);
+       tables->sal_systab.sal_rev_minor = 1;
+       tables->sal_systab.sal_rev_major = 0;
+       tables->sal_systab.entry_count = 2;
+
+       strcpy((char *)tables->sal_systab.oem_id, "Xen/ia64");
+       strcpy((char *)tables->sal_systab.product_id, "Xen/ia64");
+
+       /* PAL entry point: */
+       tables->sal_ed.type = SAL_DESC_ENTRY_POINT;
+       tables->sal_ed.pal_proc = FW_HYPERCALL_PAL_CALL_PADDR;
+       dom_fw_pal_hypercall_patch(d, tables->sal_ed.pal_proc, 
+                                  hypercalls_imva);
+       /* SAL entry point.  */
+       tables->sal_ed.sal_proc = FW_HYPERCALL_SAL_CALL_PADDR;
+       dom_fw_hypercall_patch(d, tables->sal_ed.sal_proc,
+                              FW_HYPERCALL_SAL_CALL, 1, hypercalls_imva);
+       tables->sal_ed.gp = 0;  /* will be ignored */
 
        /* Fill an AP wakeup descriptor.  */
-       sal_wakeup->type = SAL_DESC_AP_WAKEUP;
-       sal_wakeup->mechanism = IA64_SAL_AP_EXTERNAL_INT;
-       sal_wakeup->vector = XEN_SAL_BOOT_RENDEZ_VEC;
+       tables->sal_wakeup.type = SAL_DESC_AP_WAKEUP;
+       tables->sal_wakeup.mechanism = IA64_SAL_AP_EXTERNAL_INT;
+       tables->sal_wakeup.vector = XEN_SAL_BOOT_RENDEZ_VEC;
 
        /* Compute checksum.  */
-       for (cp = (char *) sal_systab; cp < (char *) efi_memmap; ++cp)
+       checksum = 0;
+       for (cp = (char *)&tables->sal_systab;
+            cp < (char *)&tables->fpswa_inf;
+            ++cp)
                checksum += *cp;
-       sal_systab->checksum = -checksum;
+       tables->sal_systab.checksum = -checksum;
 
        /* SAL return point.  */
-       d->arch.sal_return_addr = FW_HYPERCALL_SAL_RETURN_PADDR + start_mpaddr;
-       dom_fw_hypercall_patch (d, d->arch.sal_return_addr,
-                               FW_HYPERCALL_SAL_RETURN, 0);
+       dom_fw_hypercall_patch(d, FW_HYPERCALL_SAL_RETURN_PADDR,
+                              FW_HYPERCALL_SAL_RETURN, 0, hypercalls_imva);
 
        /* Fill in the FPSWA interface: */
-       fpswa_inf->revision = fpswa_interface->revision;
-       dom_fpswa_hypercall_patch(d);
-       fpswa_inf->fpswa = (void *) FW_HYPERCALL_FPSWA_ENTRY_PADDR + 
start_mpaddr;
+       tables->fpswa_inf.revision = fpswa_interface->revision;
+       dom_fpswa_hypercall_patch(d, hypercalls_imva);
+       tables->fpswa_inf.fpswa = (void *)FW_HYPERCALL_FPSWA_ENTRY_PADDR;
 
        i = 0; /* Used by MAKE_MD */
 
-       /* Create dom0/domu md entry for fw_mem area */
-       MAKE_MD(EFI_ACPI_RECLAIM_MEMORY, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
-               dom_pa((unsigned long)fw_mem),
-               dom_pa((unsigned long)fw_mem + fw_mem_size), 1);
-
-       if (d == dom0) {
-               /* hypercall patches live here, masquerade as reserved PAL 
memory */
-               
MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB|EFI_MEMORY_RUNTIME,HYPERCALL_START,HYPERCALL_END,
 0);
-
-               /* pass through the I/O port space */
-               if (!running_on_sim) {
-                       struct dom0_passthrough_arg arg;
-                       arg.md = &efi_memmap[i];
-                       arg.i = &i;
-                       arg.d = d;
-                       arg.flags = ASSIGN_writable;
-                       //XXX Is this needed?
-                       efi_memmap_walk_type(EFI_RUNTIME_SERVICES_CODE,
-                                            dom_fw_dom0_passthrough, &arg);
-                       // for ACPI table.
-                       arg.flags = ASSIGN_readonly;
-                       efi_memmap_walk_type(EFI_RUNTIME_SERVICES_DATA,
-                                            dom_fw_dom0_passthrough, &arg);
-                       arg.flags = ASSIGN_writable;
-                       efi_memmap_walk_type(EFI_ACPI_RECLAIM_MEMORY,
-                                            dom_fw_dom0_passthrough, &arg);
-                       efi_memmap_walk_type(EFI_ACPI_MEMORY_NVS,
-                                            dom_fw_dom0_passthrough, &arg);
-                       efi_memmap_walk_type(EFI_RESERVED_TYPE,
-                                            dom_fw_dom0_passthrough, &arg);
-                       efi_memmap_walk_type(EFI_MEMORY_MAPPED_IO,
-                                            dom_fw_dom0_passthrough, &arg);
-                       efi_memmap_walk_type(EFI_MEMORY_MAPPED_IO_PORT_SPACE,
-                                            dom_fw_dom0_passthrough, &arg);
-                       efi_memmap_walk_type(EFI_CONVENTIONAL_MEMORY,
-                                            dom_fw_dom0_lowmem, &arg);
-               }
-               else MAKE_MD(EFI_RESERVED_TYPE,0,0,0,0);
-       } else {
-               /* hypercall patches live here, masquerade as reserved
-                  PAL memory */
-               MAKE_MD(EFI_PAL_CODE, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
-                       HYPERCALL_START, HYPERCALL_END, 1);
+       /* hypercall patches live here, masquerade as reserved PAL memory */
+       MAKE_MD(EFI_PAL_CODE,EFI_MEMORY_WB|EFI_MEMORY_RUNTIME,
+               FW_HYPERCALL_BASE_PADDR, FW_HYPERCALL_END_PADDR);
+
+       /* Create dom0/domu md entry for fw and cpi tables area.  */
+       MAKE_MD(EFI_ACPI_MEMORY_NVS, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
+               FW_ACPI_BASE_PADDR, FW_ACPI_END_PADDR);
+       MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB | EFI_MEMORY_RUNTIME,
+               FW_TABLES_BASE_PADDR, FW_TABLES_END_PADDR);
+
+       if (d != dom0 || running_on_sim) {
+               /* DomU (or hp-ski).
+                  Create a continuous memory area.  */
+               /* Memory.  */
+               MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
+                       FW_END_PADDR, maxmem);
+               
                /* Create an entry for IO ports.  */
                MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
-                       IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE, 1);
-               MAKE_MD(EFI_RESERVED_TYPE,0,0,0,0);
-       }
-
-       // simple
-       // MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
-       //         HYPERCALL_END, maxmem, 0);
-       // is not good. Check overlap.
-       sort(efi_memmap, i, sizeof(efi_memory_desc_t),
-            efi_mdt_cmp, NULL);
-
-       // find gap and fill it with conventional memory
-       num_mds = i;
-       for (j = 0; j < num_mds; j++) {
-               unsigned long end;
-               unsigned long next_start;
-
-               md = &efi_memmap[j];
-               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
-               next_start = maxmem;
-               if (j + 1 < num_mds) {
-                       efi_memory_desc_t* next_md = &efi_memmap[j + 1];
-                       next_start = next_md->phys_addr;
-                       BUG_ON(end > next_start);
-                       if (end == next_md->phys_addr)
-                               continue;
-               }
-
-               // clip the range and align to PAGE_SIZE
-               // Avoid "legacy" low memory addresses and the
-               // HYPERCALL patch area.      
-               if (end < HYPERCALL_END)
-                       end = HYPERCALL_END;
-               if (next_start > maxmem)
-                       next_start = maxmem;
-               end = PAGE_ALIGN(end);
-               next_start = next_start & PAGE_MASK;
-               if (end >= next_start)
-                       continue;
-
-               MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
-                       end, next_start, 0);
-               if (next_start >= maxmem)
-                       break;
-       }
-       sort(efi_memmap, i, sizeof(efi_memory_desc_t), efi_mdt_cmp, NULL);
-
-       bp->efi_systab = dom_pa((unsigned long) fw_mem);
-       bp->efi_memmap = dom_pa((unsigned long) efi_memmap);
-       BUG_ON(i > NUM_MEM_DESCS);
-       bp->efi_memmap_size = i * sizeof(efi_memory_desc_t);
+                       IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
+
+               num_mds = i;
+       }
+       else {
+               /* Dom0.
+                  We must preserve ACPI data from real machine,
+                  as well as IO areas.  */
+               num_mds = complete_dom0_memmap(d, tables, maxmem, i);
+       }
+
+       /* Display memmap.  */
+       for (i = 0 ; i < num_mds; i++)
+               print_md(&tables->efi_memmap[i]);
+
+       /* Fill boot_param  */
+       bp->efi_systab = FW_FIELD_MPA(efi_systab);
+       bp->efi_memmap = FW_FIELD_MPA(efi_memmap);
+       bp->efi_memmap_size = num_mds * sizeof(efi_memory_desc_t);
        bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
        bp->efi_memdesc_version = EFI_MEMDESC_VERSION;
        bp->command_line = 0;
@@ -875,49 +867,44 @@ dom_fw_init (struct domain *d, struct ia
        bp->console_info.num_rows = 25;
        bp->console_info.orig_x = 0;
        bp->console_info.orig_y = 24;
-       bp->fpswa = dom_pa((unsigned long) fpswa_inf);
-       if (d == dom0) {
-               int j;
-               u64 addr;
-
-               // dom0 doesn't need build_physmap_table()
-               // see arch_set_info_guest()
-               // instead we allocate pages manually.
-               for (j = 0; j < i; j++) {
-                       md = &efi_memmap[j];
-                       if (md->phys_addr > maxmem)
-                               break;
-
-                       if (md->type == EFI_LOADER_DATA ||
-                           md->type == EFI_PAL_CODE ||
-                           md->type == EFI_CONVENTIONAL_MEMORY) {
-                               unsigned long start = md->phys_addr & PAGE_MASK;
-                               unsigned long end = md->phys_addr +
-                                             (md->num_pages << EFI_PAGE_SHIFT);
-
-                               if (end == start) {
-                                       // md->num_pages = 0 is allowed.
-                                       end += PAGE_SIZE;
-                               }
-                               if (end > (max_page << PAGE_SHIFT))
-                                       end = (max_page << PAGE_SHIFT);
-
-                               for (addr = start; addr < end; addr += 
PAGE_SIZE) {
-                                       assign_new_domain0_page(d, addr);
-                               }
-                       }
-               }
-               // Map low-memory holes & unmapped MMIO for legacy drivers
-               for (addr = 0; addr < 1*MB; addr += PAGE_SIZE) {
-                       if (domain_page_mapped(d, addr))
-                               continue;
-                                       
-                       if (efi_mmio(addr, PAGE_SIZE))
-                               assign_domain_mmio_page(d, addr, PAGE_SIZE);
-               }
-       }
-       for (i = 0 ; i < bp->efi_memmap_size/sizeof(efi_memory_desc_t) ; i++) {
-               md = efi_memmap + i;
-               print_md(md);
-       }
-}
+       bp->fpswa = FW_FIELD_MPA(fpswa_inf);
+}
+
+void dom_fw_setup(struct domain *d, unsigned long bp_mpa, unsigned long maxmem)
+{
+       struct ia64_boot_param *bp;
+       unsigned long imva_tables_base;
+       unsigned long imva_hypercall_base;
+
+       BUILD_BUG_ON(sizeof(struct fw_tables) >
+                    (FW_TABLES_END_PADDR - FW_TABLES_BASE_PADDR));
+
+       BUILD_BUG_ON(sizeof(struct fake_acpi_tables) >
+                    (FW_ACPI_END_PADDR - FW_ACPI_BASE_PADDR));
+
+       /* Create page for hypercalls.  */
+       assign_new_domain_page_if_dom0(d, FW_HYPERCALL_BASE_PADDR);
+       imva_hypercall_base = (unsigned long)domain_mpa_to_imva
+                                            (d, FW_HYPERCALL_BASE_PADDR);
+
+       /* Create page for acpi tables.  */
+       if (d != dom0) {
+               void *imva;
+
+               assign_new_domain_page_if_dom0(d, FW_ACPI_BASE_PADDR);
+               imva = domain_mpa_to_imva (d, FW_ACPI_BASE_PADDR);
+               dom_fw_fake_acpi(d, (struct fake_acpi_tables *)imva);
+       }
+
+       /* Create page for FW tables.  */
+       assign_new_domain_page_if_dom0(d, FW_TABLES_BASE_PADDR);
+       imva_tables_base = (unsigned long)domain_mpa_to_imva
+                                         (d, FW_TABLES_BASE_PADDR);
+
+       /* Create page for boot_param.  */
+       assign_new_domain_page_if_dom0(d, bp_mpa);
+       bp = domain_mpa_to_imva(d, bp_mpa);
+
+       dom_fw_init(d, bp, (struct fw_tables *)imva_tables_base,
+                   imva_hypercall_base, maxmem);
+}
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/domain.c        Wed Aug 30 22:36:18 2006 +0100
@@ -46,7 +46,6 @@
 #include <asm/regionreg.h>
 #include <asm/dom_fw.h>
 #include <asm/shadow.h>
-#include <asm/privop_stat.h>
 
 unsigned long dom0_size = 512*1024*1024;
 unsigned long dom0_align = 64*1024*1024;
@@ -111,6 +110,8 @@ void schedule_tail(struct vcpu *prev)
 
        if (VMX_DOMAIN(current)) {
                vmx_do_launch(current);
+               migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
+                             current->processor);
        } else {
                ia64_set_iva(&ia64_ivt);
                ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
@@ -121,6 +122,7 @@ void schedule_tail(struct vcpu *prev)
                  shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
                __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
                  (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
+               migrate_timer(&current->arch.hlt_timer, current->processor);
        }
        flush_vtlb_for_context_switch(current);
 }
@@ -134,10 +136,18 @@ void context_switch(struct vcpu *prev, s
 
     __ia64_save_fpu(prev->arch._thread.fph);
     __ia64_load_fpu(next->arch._thread.fph);
-    if (VMX_DOMAIN(prev))
-           vmx_save_state(prev);
+    if (VMX_DOMAIN(prev)) {
+       vmx_save_state(prev);
+       if (!VMX_DOMAIN(next)) {
+           /* VMX domains can change the physical cr.dcr.
+            * Restore default to prevent leakage. */
+           ia64_setreg(_IA64_REG_CR_DCR, (IA64_DCR_DP | IA64_DCR_DK
+                          | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_PP
+                          | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
+       }
+    }
     if (VMX_DOMAIN(next))
-           vmx_load_state(next);
+       vmx_load_state(next);
     /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
     prev = ia64_switch_to(next);
 
@@ -147,6 +157,8 @@ void context_switch(struct vcpu *prev, s
  
     if (VMX_DOMAIN(current)){
        vmx_load_all_rr(current);
+       migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
+                     current->processor);
     } else {
        struct domain *nd;
        extern char ia64_ivt;
@@ -228,6 +240,12 @@ void startup_cpu_idle_loop(void)
 # error "XMAPPEDREGS_SHIFT doesn't match sizeof(mapped_regs_t)."
 #endif
 
+void hlt_timer_fn(void *data)
+{
+       struct vcpu *v = data;
+       vcpu_unblock(v);
+}
+
 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
 {
        struct vcpu *v;
@@ -287,6 +305,10 @@ struct vcpu *alloc_vcpu_struct(struct do
            v->arch.breakimm = d->arch.breakimm;
            v->arch.last_processor = INVALID_PROCESSOR;
        }
+       if (!VMX_DOMAIN(v)){
+               init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
+                          first_cpu(cpu_online_map));
+       }
 
        return v;
 }
@@ -298,6 +320,7 @@ void relinquish_vcpu_resources(struct vc
                            get_order_from_shift(XMAPPEDREGS_SHIFT));
         v->arch.privregs = NULL;
     }
+    kill_timer(&v->arch.hlt_timer);
 }
 
 void free_vcpu_struct(struct vcpu *v)
@@ -532,6 +555,9 @@ void domain_relinquish_resources(struct 
     // relase page traversing d->arch.mm.
     relinquish_mm(d);
 
+    if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
+           vmx_relinquish_guest_resources(d);
+
     relinquish_memory(d, &d->xenpage_list);
     relinquish_memory(d, &d->page_list);
 
@@ -591,7 +617,7 @@ domain_set_shared_info_va (unsigned long
 /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
 #define SHADOW_COPY_CHUNK (1024 / sizeof (unsigned long))
 
-int shadow_mode_control(struct domain *d, xen_domctl_shadow_ops_t *sc)
+int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc)
 {
        unsigned int op = sc->op;
        int          rc = 0;
@@ -716,6 +742,15 @@ int shadow_mode_control(struct domain *d
                }
                break;
        }
+       case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
+               sc->mb = 0;
+               break;
+       case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
+               if (sc->mb > 0) {
+                       BUG();
+                       rc = -ENOMEM;
+               }
+               break;
        default:
                rc = -EINVAL;
                break;
@@ -1082,13 +1117,15 @@ void machine_restart(char * __unused)
        while(1);
 }
 
+extern void cpu_halt(void);
+
 void machine_halt(void)
 {
        console_start_sync();
        if (running_on_sim)
                printf ("machine_halt called.  spinning...\n");
        else
-               (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
+               cpu_halt();
        while(1);
 }
 
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/faults.c
--- a/xen/arch/ia64/xen/faults.c        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/faults.c        Wed Aug 30 22:36:18 2006 +0100
@@ -13,6 +13,8 @@
 #include <xen/smp.h>
 #include <asm/ptrace.h>
 #include <xen/delay.h>
+#include <xen/perfc.h>
+#include <xen/mm.h>
 
 #include <asm/system.h>
 #include <asm/processor.h>
@@ -26,9 +28,9 @@
 #include <asm/debugger.h>
 #include <asm/fpswa.h>
 #include <asm/bundle.h>
-#include <asm/privop_stat.h>
 #include <asm/asm-xsi-offsets.h>
 #include <asm/shadow.h>
+#include <asm/uaccess.h>
 
 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
 /* FIXME: where these declarations shold be there ? */
@@ -516,7 +518,8 @@ ia64_handle_break (unsigned long ifa, st
                debugger_trap_fatal(0 /* don't care */, regs);
        } 
 #endif
-       else if (iim == d->arch.breakimm) {
+       else if (iim == d->arch.breakimm &&
+                ia64_get_cpl(regs->cr_ipsr) == 2) {
                /* by default, do not continue */
                v->arch.hypercall_continuation = 0;
 
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/flushd.S
--- a/xen/arch/ia64/xen/flushd.S        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/flushd.S        Wed Aug 30 22:36:18 2006 +0100
@@ -16,8 +16,9 @@
         *
         *      Flush cache.
         *
-        *      Must deal with range from start to end-1 but nothing else (need 
to
-        *      be careful not to touch addresses that may be unmapped).
+        *      Must deal with range from start to end-1 but nothing else 
+        *      (need to be careful not to touch addresses that may be 
+        *      unmapped).
         *
         *      Note: "in0" and "in1" are preserved for debugging purposes.
         */
@@ -37,7 +38,8 @@ GLOBAL_ENTRY(flush_dcache_range)
        ;;
        sub     r8=r22,r23              // number of strides - 1
        shl     r24=r23,r20             // r24: addresses for "fc" =
-                                       //      "start" rounded down to stride 
boundary
+                                       //      "start" rounded down to stride 
+                                       //      boundary
        .save   ar.lc,r3
        mov     r3=ar.lc                // save ar.lc
        ;;
@@ -49,7 +51,8 @@ GLOBAL_ENTRY(flush_dcache_range)
         * 32 byte aligned loop, even number of (actually 2) bundles
         */
 .Loop: fc      r24                     // issuable on M0 only
-       add     r24=r21,r24             // we flush "stride size" bytes per 
iteration
+       add     r24=r21,r24             // we flush "stride size" bytes per
+                                       //   iteration
        nop.i   0
        br.cloop.sptk.few .Loop
        ;;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/fw_emul.c
--- a/xen/arch/ia64/xen/fw_emul.c       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/fw_emul.c       Wed Aug 30 22:36:18 2006 +0100
@@ -28,6 +28,7 @@
 #include "hpsim_ssc.h"
 #include <asm/vcpu.h>
 #include <asm/dom_fw.h>
+#include <asm/uaccess.h>
 
 extern unsigned long running_on_sim;
 
@@ -420,6 +421,141 @@ efi_emulate_get_time(
 }
 
 static efi_status_t
+efi_emulate_get_variable(
+       unsigned long name_addr, unsigned long vendor_addr,
+       unsigned long attr_addr, unsigned long data_size_addr,
+       unsigned long data_addr, IA64FAULT *fault)
+{
+       unsigned long name, vendor, attr = 0, data_size, data;
+       struct page_info *name_page = NULL, *vendor_page = NULL,
+                        *attr_page = NULL, *data_size_page = NULL,
+                        *data_page = NULL;
+       efi_status_t status = 0;
+
+       if (current->domain != dom0)
+               return EFI_UNSUPPORTED;
+
+       name = efi_translate_domain_addr(name_addr, fault, &name_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       data_size = efi_translate_domain_addr(data_size_addr, fault,
+                                             &data_size_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       data = efi_translate_domain_addr(data_addr, fault, &data_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       if (attr_addr) {
+               attr = efi_translate_domain_addr(attr_addr, fault, &attr_page);
+               if (*fault != IA64_NO_FAULT)
+                       goto errout;
+       }
+
+       status = (*efi.get_variable)((efi_char16_t *)name,
+                                    (efi_guid_t *)vendor,
+                                    (u32 *)attr,
+                                    (unsigned long *)data_size,
+                                    (void *)data);
+
+errout:
+       if (name_page != NULL)
+               put_page(name_page);
+       if (vendor_page != NULL)
+               put_page(vendor_page);
+       if (attr_page != NULL)
+               put_page(attr_page);
+       if (data_size_page != NULL)
+               put_page(data_size_page);
+       if (data_page != NULL)
+               put_page(data_page);
+
+       return status;
+}
+
+static efi_status_t
+efi_emulate_get_next_variable(
+       unsigned long name_size_addr, unsigned long name_addr,
+       unsigned long vendor_addr, IA64FAULT *fault)
+{
+       unsigned long name_size, name, vendor;
+       struct page_info *name_size_page = NULL, *name_page = NULL,
+                        *vendor_page = NULL;
+       efi_status_t status = 0;
+
+       if (current->domain != dom0)
+               return EFI_UNSUPPORTED;
+
+       name_size = efi_translate_domain_addr(name_size_addr, fault,
+                                             &name_size_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       name = efi_translate_domain_addr(name_addr, fault, &name_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+
+       status = (*efi.get_next_variable)((unsigned long *)name_size,
+                                         (efi_char16_t *)name,
+                                         (efi_guid_t *)vendor);
+
+errout:
+       if (name_size_page != NULL)
+               put_page(name_size_page);
+       if (name_page != NULL)
+               put_page(name_page);
+       if (vendor_page != NULL)
+               put_page(vendor_page);
+
+       return status;
+}
+
+static efi_status_t
+efi_emulate_set_variable(
+       unsigned long name_addr, unsigned long vendor_addr, 
+       unsigned long attr, unsigned long data_size, 
+       unsigned long data_addr, IA64FAULT *fault)
+{
+       unsigned long name, vendor, data;
+       struct page_info *name_page = NULL, *vendor_page = NULL,
+                        *data_page = NULL;
+       efi_status_t status = 0;
+
+       if (current->domain != dom0)
+               return EFI_UNSUPPORTED;
+
+       name = efi_translate_domain_addr(name_addr, fault, &name_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       vendor = efi_translate_domain_addr(vendor_addr, fault, &vendor_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+       data = efi_translate_domain_addr(data_addr, fault, &data_page);
+       if (*fault != IA64_NO_FAULT)
+               goto errout;
+
+       status = (*efi.set_variable)((efi_char16_t *)name,
+                                    (efi_guid_t *)vendor,
+                                    attr,
+                                    data_size,
+                                    (void *)data);
+
+errout:
+       if (name_page != NULL)
+               put_page(name_page);
+       if (vendor_page != NULL)
+               put_page(vendor_page);
+       if (data_page != NULL)
+               put_page(data_page);
+
+       return status;
+}
+
+static efi_status_t
 efi_emulate_set_virtual_address_map(
        unsigned long memory_map_size, unsigned long descriptor_size,
        u32 descriptor_version, efi_memory_desc_t *virtual_map)
@@ -527,6 +663,31 @@ efi_emulator (struct pt_regs *regs, IA64
                                vcpu_get_gr(v,33),
                                fault);
                break;
+           case FW_HYPERCALL_EFI_GET_VARIABLE:
+               status = efi_emulate_get_variable (
+                               vcpu_get_gr(v,32),
+                               vcpu_get_gr(v,33),
+                               vcpu_get_gr(v,34),
+                               vcpu_get_gr(v,35),
+                               vcpu_get_gr(v,36),
+                               fault);
+               break;
+           case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
+               status = efi_emulate_get_next_variable (
+                               vcpu_get_gr(v,32),
+                               vcpu_get_gr(v,33),
+                               vcpu_get_gr(v,34),
+                               fault);
+               break;
+           case FW_HYPERCALL_EFI_SET_VARIABLE:
+               status = efi_emulate_set_variable (
+                               vcpu_get_gr(v,32),
+                               vcpu_get_gr(v,33),
+                               vcpu_get_gr(v,34),
+                               vcpu_get_gr(v,35),
+                               vcpu_get_gr(v,36),
+                               fault);
+               break;
            case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
                status = efi_emulate_set_virtual_address_map (
                                vcpu_get_gr(v,32),
@@ -538,10 +699,6 @@ efi_emulator (struct pt_regs *regs, IA64
            case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
            case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
                // FIXME: need fixes in efi.h from 2.6.9
-           case FW_HYPERCALL_EFI_GET_VARIABLE:
-               // FIXME: need fixes in efi.h from 2.6.9
-           case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
-           case FW_HYPERCALL_EFI_SET_VARIABLE:
            case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
                // FIXME: need fixes in efi.h from 2.6.9
                status = EFI_UNSUPPORTED;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/hypercall.c     Wed Aug 30 22:36:18 2006 +0100
@@ -11,6 +11,7 @@
 #include <xen/hypercall.h>
 #include <xen/multicall.h>
 #include <xen/guest_access.h>
+#include <xen/mm.h>
 
 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
 #include <asm/sal.h>   /* FOR struct ia64_sal_retval */
@@ -29,45 +30,45 @@
 #include <xen/domain.h>
 #include <public/callback.h>
 #include <xen/event.h>
-#include <asm/privop_stat.h>
+#include <xen/perfc.h>
 
 static long do_physdev_op_compat(XEN_GUEST_HANDLE(physdev_op_t) uop);
 static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
 static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
 
-hypercall_t ia64_hypercall_table[] =
-       {
-       (hypercall_t)do_ni_hypercall,           /* do_set_trap_table */         
/*  0 */
+const hypercall_t ia64_hypercall_table[NR_hypercalls] =
+{
+       (hypercall_t)do_ni_hypercall,           /* do_set_trap_table *//*  0 */
        (hypercall_t)do_ni_hypercall,           /* do_mmu_update */
        (hypercall_t)do_ni_hypercall,           /* do_set_gdt */
        (hypercall_t)do_ni_hypercall,           /* do_stack_switch */
        (hypercall_t)do_ni_hypercall,           /* do_set_callbacks */
-       (hypercall_t)do_ni_hypercall,           /* do_fpu_taskswitch */         
/*  5 */
+       (hypercall_t)do_ni_hypercall,           /* do_fpu_taskswitch *//*  5 */
        (hypercall_t)do_sched_op_compat,
        (hypercall_t)do_ni_hypercall,
        (hypercall_t)do_ni_hypercall,           /* do_set_debugreg */
        (hypercall_t)do_ni_hypercall,           /* do_get_debugreg */
-       (hypercall_t)do_ni_hypercall,           /* do_update_descriptor */      
/* 10 */
+       (hypercall_t)do_ni_hypercall,           /* do_update_descriptor * 10 */
        (hypercall_t)do_ni_hypercall,           /* do_ni_hypercall */
        (hypercall_t)do_memory_op,
        (hypercall_t)do_multicall,
        (hypercall_t)do_ni_hypercall,           /* do_update_va_mapping */
-       (hypercall_t)do_ni_hypercall,           /* do_set_timer_op */           
/* 15 */
+       (hypercall_t)do_ni_hypercall,           /* do_set_timer_op */  /* 15 */
        (hypercall_t)do_event_channel_op_compat,
        (hypercall_t)do_xen_version,
        (hypercall_t)do_console_io,
        (hypercall_t)do_physdev_op_compat,
-       (hypercall_t)do_grant_table_op,                                         
/* 20 */
+       (hypercall_t)do_grant_table_op,                                /* 20 */
        (hypercall_t)do_ni_hypercall,           /* do_vm_assist */
-       (hypercall_t)do_ni_hypercall,           /* 
do_update_va_mapping_otherdomain */
+       (hypercall_t)do_ni_hypercall,           /* do_update_va_mapping_othe */
        (hypercall_t)do_ni_hypercall,           /* (x86 only) */
        (hypercall_t)do_ni_hypercall,           /* do_vcpu_op */
-       (hypercall_t)do_ni_hypercall,           /* (x86_64 only) */             
/* 25 */
+       (hypercall_t)do_ni_hypercall,           /* (x86_64 only) */    /* 25 */
        (hypercall_t)do_ni_hypercall,           /* do_mmuext_op */
        (hypercall_t)do_ni_hypercall,           /* do_acm_op */
        (hypercall_t)do_ni_hypercall,           /* do_nmi_op */
        (hypercall_t)do_sched_op,
-       (hypercall_t)do_callback_op,            /*  */                  /* 30 */
+       (hypercall_t)do_callback_op,            /*  */                 /* 30 */
        (hypercall_t)do_ni_hypercall,           /*  */
        (hypercall_t)do_event_channel_op,
        (hypercall_t)do_physdev_op,
@@ -77,33 +78,52 @@ hypercall_t ia64_hypercall_table[] =
        (hypercall_t)do_ni_hypercall,           /*  */
        (hypercall_t)do_ni_hypercall,           /*  */
        (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */                  /* 40 */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */                  /* 45 */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_ni_hypercall,           /*  */
-       (hypercall_t)do_dom0vp_op,                      /* dom0vp_op */
+       (hypercall_t)do_ni_hypercall,           /*  */                 /* 40 */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */                 /* 45 */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_dom0vp_op,              /* dom0vp_op */
        (hypercall_t)do_ni_hypercall,           /* arch_1 */
-       (hypercall_t)do_ni_hypercall,           /* arch_2 */            /* 50 */
+       (hypercall_t)do_ni_hypercall,           /* arch_2 */           /* 50 */
        (hypercall_t)do_ni_hypercall,           /* arch_3 */
        (hypercall_t)do_ni_hypercall,           /* arch_4 */
        (hypercall_t)do_ni_hypercall,           /* arch_5 */
        (hypercall_t)do_ni_hypercall,           /* arch_6 */
-       (hypercall_t)do_ni_hypercall            /* arch_7 */            /* 55 */
-       };
-
-uint32_t nr_hypercalls =
-       sizeof(ia64_hypercall_table) / sizeof(hypercall_t);
+       (hypercall_t)do_ni_hypercall,           /* arch_7 */           /* 55 */
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall,                                  /* 60 */
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall,
+       (hypercall_t)do_ni_hypercall
+};
 
 static IA64FAULT
 xen_hypercall (struct pt_regs *regs)
 {
        uint32_t cmd = (uint32_t)regs->r2;
-
-       if (cmd < nr_hypercalls)
+       struct vcpu *v = current;
+
+       if (cmd == __HYPERVISOR_grant_table_op) {
+               XEN_GUEST_HANDLE(void) uop;
+
+               v->arch.hypercall_param.va = regs->r15;
+               v->arch.hypercall_param.pa1 = regs->r17;
+               v->arch.hypercall_param.pa2 = regs->r18;
+               set_xen_guest_handle(uop, (void *)regs->r15);
+               regs->r8 = do_grant_table_op(regs->r14, uop, regs->r16);
+               v->arch.hypercall_param.va = 0;
+               return IA64_NO_FAULT;
+       }
+
+       if (cmd < NR_hypercalls) {
+               perfc_incra(hypercalls, cmd);
                regs->r8 = (*ia64_hypercall_table[cmd])(
                        regs->r14,
                        regs->r15,
@@ -111,12 +131,11 @@ xen_hypercall (struct pt_regs *regs)
                        regs->r17,
                        regs->r18,
                        regs->r19);
-       else
+       } else
                regs->r8 = -ENOSYS;
 
        return IA64_NO_FAULT;
 }
-
 
 static void
 fw_hypercall_ipi (struct pt_regs *regs)
@@ -153,7 +172,7 @@ fw_hypercall_ipi (struct pt_regs *regs)
                vcpu_init_regs (targ);
                vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
                vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
-               vcpu_regs (targ)->b0 = d->arch.sal_return_addr;
+               vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
 
                if (test_and_clear_bit(_VCPUF_down,
                                       &targ->vcpu_flags)) {
@@ -217,7 +236,12 @@ fw_hypercall (struct pt_regs *regs)
                        }
                        else {
                                perfc_incrc(pal_halt_light);
-                               do_sched_op_compat(SCHEDOP_yield, 0);
+                               migrate_timer(&v->arch.hlt_timer,
+                                             v->processor);
+                               set_timer(&v->arch.hlt_timer,
+                                         vcpu_get_next_timer_ns(v));
+                               do_sched_op_compat(SCHEDOP_block, 0);
+                               stop_timer(&v->arch.hlt_timer);
                        }
                        regs->r8 = 0;
                        regs->r9 = 0;
@@ -276,52 +300,10 @@ fw_hypercall (struct pt_regs *regs)
        return IA64_NO_FAULT;
 }
 
-/* opt_unsafe_hypercall: If true, unsafe debugging hypercalls are allowed.
-   These can create security hole.  */
-static int opt_unsafe_hypercall = 0;
-boolean_param("unsafe_hypercall", opt_unsafe_hypercall);
-
 IA64FAULT
 ia64_hypercall (struct pt_regs *regs)
 {
-       struct vcpu *v = current;
        unsigned long index = regs->r2;
-       int privlvl = (regs->cr_ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
-
-       if (index >= FW_HYPERCALL_FIRST_USER) {
-           /* Note: user hypercalls are not safe, since Xen doesn't
-              check memory access privilege: Xen does not deny reading
-              or writing to kernel memory.  */
-           if (!opt_unsafe_hypercall) {
-               printf("user xen/ia64 hypercalls disabled\n");
-               regs->r8 = -1;
-           }
-           else switch (index) {
-               case 0xffff:
-                       regs->r8 = dump_privop_counts_to_user(
-                               (char *) vcpu_get_gr(v,32),
-                               (int) vcpu_get_gr(v,33));
-                       break;
-               case 0xfffe:
-                       regs->r8 = zero_privop_counts_to_user(
-                               (char *) vcpu_get_gr(v,32),
-                               (int) vcpu_get_gr(v,33));
-                       break;
-               default:
-                       printf("unknown user xen/ia64 hypercall %lx\n", index);
-                       regs->r8 = do_ni_hypercall();
-           }
-           return IA64_NO_FAULT;
-       }
-
-       /* Hypercalls are only allowed by kernel.
-          Kernel checks memory accesses.  */
-       if (VMX_DOMAIN(v) ? (privlvl != 0) : (privlvl != 2)) {
-           /* FIXME: Return a better error value ?
-              Reflection ? Illegal operation ?  */
-           regs->r8 = -1;
-           return IA64_NO_FAULT;
-       }
 
        if (index >= FW_HYPERCALL_FIRST_ARCH)
            return fw_hypercall (regs);
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/hyperprivop.S   Wed Aug 30 22:36:18 2006 +0100
@@ -18,7 +18,8 @@
 
 
 #define        _PAGE_PPN_MASK  0x0003fffffffff000 //asm/pgtable.h doesn't do 
assembly
-#define PAGE_PHYS      0x0010000000000761 
//__pgprot(__DIRTY_BITS|_PAGE_PL_2|_PAGE_AR_RWX)
+#define PAGE_PHYS      0x0010000000000761 //__pgprot(__DIRTY_BITS|
+                                          //         _PAGE_PL_2|_PAGE_AR_RWX)
 #define _PAGE_PL_2     (2<<7)
 
 #if 1   // change to 0 to turn off all fast paths
@@ -32,10 +33,10 @@
        
 //#define FAST_TICK // mostly working (unat problems) but default off for now
 //#define FAST_TLB_MISS_REFLECT        // mostly working but default off for 
now
-# undef FAST_ITC       //XXX TODO fast_itc doesn't suport dom0 vp yet.
+# undef FAST_ITC               //XXX TODO fast_itc doesn't support dom0 vp yet
 # define FAST_BREAK
-# undef FAST_ACCESS_REFLECT //XXX TODO fast_access_reflect
-                            //    doesn't support dom0 vp yet.
+# undef FAST_ACCESS_REFLECT    //XXX TODO fast_access_reflect
+                               //    doesn't support dom0 vp yet.
 # define FAST_RFI
 # define FAST_SSM_I
 # define FAST_PTC_GA
@@ -104,79 +105,100 @@ 1:       // when we get to here r20=~=interrup
 1:     // when we get to here r20=~=interrupts pending
        // Check pending event indication
 (p7)   movl r20=THIS_CPU(current_psr_i_addr);;
-(p7)   ld8 r20=[r20];;
-(p7)   adds r20=-1,r20;;       /* evtchn_upcall_pending */
-(p7)   ld1 r20=[r20];;
+(p7)   ld8 r20=[r20]
+       ;;
+(p7)   adds r20=-1,r20                         // evtchn_upcall_pending
+       ;;
+(p7)   ld1 r20=[r20]
+       ;;
 
        // HYPERPRIVOP_RFI?
        cmp.eq p7,p6=HYPERPRIVOP_RFI,r17
-(p7)   br.sptk.many hyper_rfi;;
+(p7)   br.sptk.many hyper_rfi
+       ;;
 
        // HYPERPRIVOP_GET_IVR?
        cmp.eq p7,p6=HYPERPRIVOP_GET_IVR,r17
-(p7)   br.sptk.many hyper_get_ivr;;
+(p7)   br.sptk.many hyper_get_ivr
+       ;;
 
        cmp.ne p7,p0=r20,r0
-(p7)   br.spnt.many dispatch_break_fault ;;
+(p7)   br.spnt.many dispatch_break_fault
+       ;;
 
        // HYPERPRIVOP_COVER?
        cmp.eq p7,p6=HYPERPRIVOP_COVER,r17
-(p7)   br.sptk.many hyper_cover;;
+(p7)   br.sptk.many hyper_cover
+       ;;
 
        // HYPERPRIVOP_SSM_DT?
        cmp.eq p7,p6=HYPERPRIVOP_SSM_DT,r17
-(p7)   br.sptk.many hyper_ssm_dt;;
+(p7)   br.sptk.many hyper_ssm_dt
+       ;;
 
        // HYPERPRIVOP_RSM_DT?
        cmp.eq p7,p6=HYPERPRIVOP_RSM_DT,r17
-(p7)   br.sptk.many hyper_rsm_dt;;
+(p7)   br.sptk.many hyper_rsm_dt
+       ;;
 
        // HYPERPRIVOP_GET_TPR?
        cmp.eq p7,p6=HYPERPRIVOP_GET_TPR,r17
-(p7)   br.sptk.many hyper_get_tpr;;
+(p7)   br.sptk.many hyper_get_tpr
+       ;;
 
        // HYPERPRIVOP_SET_TPR?
        cmp.eq p7,p6=HYPERPRIVOP_SET_TPR,r17
-(p7)   br.sptk.many hyper_set_tpr;;
+(p7)   br.sptk.many hyper_set_tpr
+       ;;
 
        // HYPERPRIVOP_EOI?
        cmp.eq p7,p6=HYPERPRIVOP_EOI,r17
-(p7)   br.sptk.many hyper_eoi;;
+(p7)   br.sptk.many hyper_eoi
+       ;;
 
        // HYPERPRIVOP_SET_ITM?
        cmp.eq p7,p6=HYPERPRIVOP_SET_ITM,r17
-(p7)   br.sptk.many hyper_set_itm;;
+(p7)   br.sptk.many hyper_set_itm
+       ;;
 
        // HYPERPRIVOP_SET_RR?
        cmp.eq p7,p6=HYPERPRIVOP_SET_RR,r17
-(p7)   br.sptk.many hyper_set_rr;;
+(p7)   br.sptk.many hyper_set_rr
+       ;;
 
        // HYPERPRIVOP_GET_RR?
        cmp.eq p7,p6=HYPERPRIVOP_GET_RR,r17
-(p7)   br.sptk.many hyper_get_rr;;
+(p7)   br.sptk.many hyper_get_rr
+       ;;
 
        // HYPERPRIVOP_PTC_GA?
        cmp.eq p7,p6=HYPERPRIVOP_PTC_GA,r17
-(p7)   br.sptk.many hyper_ptc_ga;;
+(p7)   br.sptk.many hyper_ptc_ga
+       ;;
 
        // HYPERPRIVOP_ITC_D?
        cmp.eq p7,p6=HYPERPRIVOP_ITC_D,r17
-(p7)   br.sptk.many hyper_itc_d;;
+(p7)   br.sptk.many hyper_itc_d
+       ;;
 
        // HYPERPRIVOP_ITC_I?
        cmp.eq p7,p6=HYPERPRIVOP_ITC_I,r17
-(p7)   br.sptk.many hyper_itc_i;;
+(p7)   br.sptk.many hyper_itc_i
+       ;;
 
        // HYPERPRIVOP_THASH?
        cmp.eq p7,p6=HYPERPRIVOP_THASH,r17
-(p7)   br.sptk.many hyper_thash;;
+(p7)   br.sptk.many hyper_thash
+       ;;
 
        // HYPERPRIVOP_SET_KR?
        cmp.eq p7,p6=HYPERPRIVOP_SET_KR,r17
-(p7)   br.sptk.many hyper_set_kr;;
+(p7)   br.sptk.many hyper_set_kr
+       ;;
 
        // if not one of the above, give up for now and do it the slow way
-       br.sptk.many dispatch_break_fault ;;
+       br.sptk.many dispatch_break_fault
+       ;;
 END(fast_hyperprivop)
 
 // give up for now if: ipsr.be==1, ipsr.pp==1
@@ -701,9 +723,9 @@ ENTRY(fast_reflect)
        .mem.offset 0,0; st8.spill [r2]=r30,16;
        .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
 #ifdef HANDLE_AR_UNAT
-       // r16~r23 are preserved regsin bank0 regs, we need to restore them,
-    // r24~r31 are scratch regs, we don't need to handle NaT bit,
-    // because OS handler must assign it before access it
+       // r16~r23 are preserved regs in bank0 regs, we need to restore them,
+       // r24~r31 are scratch regs, we don't need to handle NaT bit,
+       // because OS handler must assign it before access it
        ld8 r16=[r2],16;
        ld8 r17=[r3],16;;
        ld8 r18=[r2],16;
@@ -1114,7 +1136,8 @@ just_do_rfi:
 (p7)   st4 [r18]=r19;;
 (p6)   st4 [r18]=r0;;
        // force on psr.ic, i, dt, rt, it, bn
-       movl 
r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
+       movl r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT| \
+                 IA64_PSR_IT|IA64_PSR_BN)
        ;;
        or r21=r21,r20
        ;;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/irq.c   Wed Aug 30 22:36:18 2006 +0100
@@ -40,7 +40,6 @@
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/bitops.h>
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/delay.h>
 #include <xen/irq.h>
@@ -236,9 +235,6 @@ int setup_vector(unsigned int irq, struc
        struct irqaction *old, **p;
        irq_desc_t *desc = irq_descp(irq);
 
-       printf ("setup_vector(%d): handler=%p, flags=%x\n",
-               irq, desc->handler, desc->status);
-
        /*
         * The following block of code has to be executed atomically
         */
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/ivt.S   Wed Aug 30 22:36:18 2006 +0100
@@ -15,7 +15,8 @@
  *      Fenghua Yu <fenghua.yu@xxxxxxxxx>
  *
  * 00/08/23 Asit Mallick <asit.k.mallick@xxxxxxxxx> TLB handling for SMP
- * 00/12/20 David Mosberger-Tang <davidm@xxxxxxxxxx> DTLB/ITLB handler now 
uses virtual PT.
+ * 00/12/20 David Mosberger-Tang <davidm@xxxxxxxxxx> DTLB/ITLB handler now
+ * uses virtual PT.
  */
 /*
  * This file defines the interruption vector table used by the CPU.
@@ -69,10 +70,14 @@
 
 #if 0
   /*
-   * This lets you track the last eight faults that occurred on the CPU.  Make 
sure ar.k2 isn't
-   * needed for something else before enabling this...
+   * This lets you track the last eight faults that occurred on the CPU.
+   * Make sure ar.k2 isn't needed for something else before enabling this...
    */
-# define DBG_FAULT(i)  mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov 
ar.k2=r16
+# define DBG_FAULT(i)          \
+       mov r16=ar.k2;;         \
+       shl r16=r16,8;;         \
+       add r16=(i),r16;;       \
+       mov ar.k2=r16
 #else
 # define DBG_FAULT(i)
 #endif
@@ -80,18 +85,18 @@
 #define MINSTATE_VIRT  /* needed by minstate.h */
 #include "minstate.h"
 
-#define FAULT(n)                                                               
        \
-       mov r19=n;                      /* prepare to save predicates */        
        \
-       mov r31=pr;                                                             
        \
+#define FAULT(n)                                                       \
+       mov r19=n;              /* prepare to save predicates */        \
+       mov r31=pr;                                                     \
        br.sptk.many dispatch_to_fault_handler
 
-#define FAULT_OR_REFLECT(n)                                                    
        \
-       mov r20=cr.ipsr;                                                        
        \
-       mov r19=n;      /* prepare to save predicates */                        
        \
-       mov r31=pr;;                                                            
        \
-       extr.u r20=r20,IA64_PSR_CPL0_BIT,2;;                                    
        \
-       cmp.ne p6,p0=r0,r20;    /* cpl != 0?*/                                  
        \
-(p6)   br.dptk.many dispatch_reflection;                                       
        \
+#define FAULT_OR_REFLECT(n)                                            \
+       mov r20=cr.ipsr;                                                \
+       mov r19=n;              /* prepare to save predicates */        \
+       mov r31=pr;;                                                    \
+       extr.u r20=r20,IA64_PSR_CPL0_BIT,2;;                            \
+       cmp.ne p6,p0=r0,r20;    /* cpl != 0?*/                          \
+(p6)   br.dptk.many dispatch_reflection;                               \
        br.sptk.few dispatch_to_fault_handler
 
        .section .text.ivt,"ax"
@@ -99,7 +104,7 @@
        .align 32768    // align on 32KB boundary
        .global ia64_ivt
 ia64_ivt:
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
 ENTRY(vhpt_miss)
        DBG_FAULT(0)
@@ -107,33 +112,34 @@ ENTRY(vhpt_miss)
        FAULT(0)
 #else
        /*
-        * The VHPT vector is invoked when the TLB entry for the virtual page 
table
-        * is missing.  This happens only as a result of a previous
-        * (the "original") TLB miss, which may either be caused by an 
instruction
-        * fetch or a data access (or non-access).
+        * The VHPT vector is invoked when the TLB entry for the virtual
+        * page table is missing.  This happens only as a result of a 
+        * previous (the "original") TLB miss, which may either be caused
+        * by an instruction fetch or a data access (or non-access).
         *
-        * What we do here is normal TLB miss handing for the _original_ miss, 
followed
-        * by inserting the TLB entry for the virtual page table page that the 
VHPT
-        * walker was attempting to access.  The latter gets inserted as long
-        * as both L1 and L2 have valid mappings for the faulting address.
-        * The TLB entry for the original miss gets inserted only if
-        * the L3 entry indicates that the page is present.
+        * What we do here is normal TLB miss handing for the _original_ 
+        * miss, followed by inserting the TLB entry for the virtual page
+        * table page that the VHPT walker was attempting to access.  The
+        * latter gets inserted as long as both L1 and L2 have valid 
+        * mappings for the faulting address.  The TLB entry for the 
+        * original miss gets inserted only if the L3 entry indicates
+        * that the page is present.
         *
         * do_page_fault gets invoked in the following cases:
         *      - the faulting virtual address uses unimplemented address bits
         *      - the faulting virtual address has no L1, L2, or L3 mapping
         */
-       mov r16=cr.ifa                          // get address that caused the 
TLB miss
+       mov r16=cr.ifa                  // get address that caused the TLB miss
 #ifdef CONFIG_HUGETLB_PAGE
        movl r18=PAGE_SHIFT
        mov r25=cr.itir
 #endif
        ;;
-       rsm psr.dt                              // use physical addressing for 
data
-       mov r31=pr                              // save the predicate registers
-       mov r19=IA64_KR(PT_BASE)                // get page table base address
-       shl r21=r16,3                           // shift bit 60 into sign bit
-       shr.u r17=r16,61                        // get the region number into 
r17
+       rsm psr.dt                      // use physical addressing for data
+       mov r31=pr                      // save the predicate registers
+       mov r19=IA64_KR(PT_BASE)        // get page table base address
+       shl r21=r16,3                   // shift bit 60 into sign bit
+       shr.u r17=r16,61                // get the region number into r17
        ;;
        shr r22=r21,3
 #ifdef CONFIG_HUGETLB_PAGE
@@ -146,56 +152,68 @@ ENTRY(vhpt_miss)
 (p8)   shr r22=r22,r27
 #endif
        ;;
-       cmp.eq p6,p7=5,r17                      // is IFA pointing into to 
region 5?
-       shr.u r18=r22,PGDIR_SHIFT               // get bits 33-63 of the 
faulting address
-       ;;
-(p7)   dep r17=r17,r19,(PAGE_SHIFT-3),3        // put region number bits in 
place
+       cmp.eq p6,p7=5,r17              // is IFA pointing into to region 5?
+       shr.u r18=r22,PGDIR_SHIFT       // get bits 33-63 of faulting address
+       ;;
+(p7)   dep r17=r17,r19,(PAGE_SHIFT-3),3  // put region number bits in place
 
        srlz.d
-       LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at 
swapper_pg_dir
+       LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at 
+                                               //   swapper_pg_dir
 
        .pred.rel "mutex", p6, p7
 (p6)   shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
 (p7)   shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
        ;;
 (p6)   dep r17=r18,r19,3,(PAGE_SHIFT-3)        // r17=PTA + IFA(33,42)*8
-(p7)   dep r17=r18,r17,3,(PAGE_SHIFT-6)        // r17=PTA + (((IFA(61,63) << 
7) | IFA(33,39))*8)
-       cmp.eq p7,p6=0,r21                      // unused address bits all 
zeroes?
+(p7)   dep r17=r18,r17,3,(PAGE_SHIFT-6)        // r17=PTA + 
+                                               //     (((IFA(61,63) << 7) |
+                                               //      IFA(33,39))*8)
+       cmp.eq p7,p6=0,r21                      // unused address bits all zero?
        shr.u r18=r22,PMD_SHIFT                 // shift L2 index into position
        ;;
        ld8 r17=[r17]                           // fetch the L1 entry (may be 0)
        ;;
 (p7)   cmp.eq p6,p7=r17,r0                     // was L1 entry NULL?
-       dep r17=r18,r17,3,(PAGE_SHIFT-3)        // compute address of L2 page 
table entry
+       dep r17=r18,r17,3,(PAGE_SHIFT-3)        // compute address of L2 page
+                                               //   table entry
        ;;
 (p7)   ld8 r20=[r17]                           // fetch the L2 entry (may be 0)
        shr.u r19=r22,PAGE_SHIFT                // shift L3 index into position
        ;;
 (p7)   cmp.eq.or.andcm p6,p7=r20,r0            // was L2 entry NULL?
-       dep r21=r19,r20,3,(PAGE_SHIFT-3)        // compute address of L3 page 
table entry
+       dep r21=r19,r20,3,(PAGE_SHIFT-3)        // compute address of L3 page
+                                               //   table entry
        ;;
 (p7)   ld8 r18=[r21]                           // read the L3 PTE
-       mov r19=cr.isr                          // cr.isr bit 0 tells us if 
this is an insn miss
+       mov r19=cr.isr                          // cr.isr bit 0 tells us if
+                                               //   this is an insn miss
        ;;
 (p7)   tbit.z p6,p7=r18,_PAGE_P_BIT            // page present bit cleared?
-       mov r22=cr.iha                          // get the VHPT address that 
caused the TLB miss
+       mov r22=cr.iha                          // get the VHPT address that
+                                               //   caused the TLB miss
        ;;                                      // avoid RAW on p7
-(p7)   tbit.nz.unc p10,p11=r19,32              // is it an instruction TLB 
miss?
-       dep r23=0,r20,0,PAGE_SHIFT              // clear low bits to get page 
address
-       ;;
-(p10)  itc.i r18                               // insert the instruction TLB 
entry
+(p7)   tbit.nz.unc p10,p11=r19,32              // is it an instruction TLB
+                                               //   miss?
+       dep r23=0,r20,0,PAGE_SHIFT              // clear low bits to get page
+                                               //   address
+       ;;
+(p10)  itc.i r18                               // insert the instruction TLB
+                                               //   entry
 (p11)  itc.d r18                               // insert the data TLB entry
-(p6)   br.cond.spnt.many page_fault            // handle bad address/page not 
present (page fault)
+(p6)   br.cond.spnt.many page_fault            // handle bad address/page not
+                                               //   present (page fault)
        mov cr.ifa=r22
 
 #ifdef CONFIG_HUGETLB_PAGE
-(p8)   mov cr.itir=r25                         // change to default page-size 
for VHPT
-#endif
-
-       /*
-        * Now compute and insert the TLB entry for the virtual page table.  We 
never
-        * execute in a page table page so there is no need to set the 
exception deferral
-        * bit.
+(p8)   mov cr.itir=r25                         // change to default page-size
+                                               //   for VHPT
+#endif
+
+       /*
+        * Now compute and insert the TLB entry for the virtual page table.
+        * We never execute in a page table page so there is no need to set
+        * the exception deferral bit.
         */
        adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
        ;;
@@ -203,15 +221,15 @@ ENTRY(vhpt_miss)
        ;;
 #ifdef CONFIG_SMP
        /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
 
        /*
-        * Re-check L2 and L3 pagetable.  If they changed, we may have received 
a ptc.g
-        * between reading the pagetable and the "itc".  If so, flush the entry 
we
-        * inserted and retry.
+        * Re-check L2 and L3 pagetable.  If they changed, we may have 
+        * received a ptc.g between reading the pagetable and the "itc".
+        * If so, flush the entry we inserted and retry.
         */
        ld8 r25=[r21]                           // read L3 PTE again
        ld8 r26=[r17]                           // read L2 entry again
@@ -231,7 +249,7 @@ END(vhpt_miss)
 END(vhpt_miss)
 
        .org ia64_ivt+0x400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
 ENTRY(itlb_miss)
        DBG_FAULT(1)
@@ -266,7 +284,8 @@ ENTRY(itlb_miss)
        mov r31=pr                              // save predicates
 .itlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
-       movl r30=1f                             // load nested fault 
continuation point
+       movl r30=1f                             // load nested fault 
+                                               //   continuation point
        ;;
 1:     ld8 r18=[r17]                           // read L3 PTE
        ;;
@@ -278,13 +297,13 @@ 1:        ld8 r18=[r17]                           // read 
L3 PTE
        ;;
 #ifdef CONFIG_SMP
        /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
 
-       ld8 r19=[r17]                           // read L3 PTE again and see if 
same
-       mov r20=PAGE_SHIFT<<2                   // setup page size for purge
+       ld8 r19=[r17]                   // read L3 PTE again and see if same
+       mov r20=PAGE_SHIFT<<2           // setup page size for purge
        ;;
        cmp.ne p7,p0=r18,r19
        ;;
@@ -295,26 +314,25 @@ END(itlb_miss)
 END(itlb_miss)
 
        .org ia64_ivt+0x0800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
 ENTRY(dtlb_miss)
        DBG_FAULT(2)
 #ifdef XEN
-       mov r16=cr.ifa                          // get virtual address
+       mov r16=cr.ifa                  // get virtual address
        mov r31=pr
        ;;
        extr.u r17=r16,59,5
        ;;
-       /* If address belongs to VMM, go to alt tlb handler */
-       cmp.eq p6,p0=0x1e,r17
+       cmp.eq p6,p0=0x1e,r17           // if the address belongs to VMM, go
+                                       //   to the alternate tlb handler
 (p6)   br.cond.spnt    late_alt_dtlb_miss
        ;;
        cmp.eq p6,p0=0x1d,r17
 (p6)   br.cond.spnt    late_alt_dtlb_miss
        ;;
 #if VHPT_ENABLED
-       // XXX TODO optimization
-       mov r30=cr.ipsr
+       mov r30=cr.ipsr                 // XXX TODO optimization
        mov r28=cr.iip                  
        mov r17=cr.isr
        ;;
@@ -324,15 +342,14 @@ ENTRY(dtlb_miss)
        cmp.ne p6, p0 = r0, r18                 // cpl == 0?
 (p6)   br.cond.sptk 2f
 
-       // is speculation bit on?
-       tbit.nz p7,p0=r17,IA64_ISR_SP_BIT       
+       tbit.nz p7,p0=r17,IA64_ISR_SP_BIT       // is speculation bit on?
        ;; 
 (p7)   br.cond.spnt 2f
 
-       // Is the faulted iip in vmm area?
-       // check [59:58] bit
-       // 00, 11: guest
-       // 01, 10: vmm
+       // Is the faulted iip in the vmm area?
+       //    -- check [59:58] bit
+       //    -- if 00, 11: guest
+       //    -- if 01, 10: vmm
        extr.u r19 = r28, 58, 2
        ;; 
        cmp.eq p10, p0 = 0x0, r19
@@ -341,17 +358,16 @@ ENTRY(dtlb_miss)
 (p11)  br.cond.sptk 2f
 
        // Is the faulted address is in the identity mapping area?
-       // 0xf000... or 0xe8000...
+       // must be either 0xf000... or 0xe8000...
        extr.u r20 = r16, 59, 5
        ;; 
-       cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+       cmp.eq p12, p0 = 0x1e, r20      // (0xf0 >> 3) = 0x1e
 (p12)  br.cond.spnt 1f
-       cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+       cmp.eq p0, p13 = 0x1d, r20      // (0xe8 >> 3) = 0x1d
 (p13)  br.cond.sptk 2f
 
 1:
-       // xen identity mappin area.
-       movl r24=PAGE_KERNEL
+       movl r24=PAGE_KERNEL            // xen identity mapping area.
        movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
        ;;
        shr.u r26=r16,55        // move address bit 59 to bit 4
@@ -361,7 +377,8 @@ 1:
        ;; 
        or r25=r25,r24          // insert PTE control bits into r25
        ;;
-       or r25=r25,r26          // set bit 4 (uncached) if the access was to 
region 6
+       or r25=r25,r26          // set bit 4 (uncached) if the access was to
+                               //   region 6
        ;;
        itc.d r25               // insert the TLB entry
        mov pr=r31,-1
@@ -388,7 +405,8 @@ 2:
 #endif
 dtlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
-       movl r30=1f                             // load nested fault 
continuation point
+       movl r30=1f                             // load nested fault 
+                                               //   continuation point
        ;;
 1:     ld8 r18=[r17]                           // read L3 PTE
        ;;
@@ -400,13 +418,13 @@ 1:        ld8 r18=[r17]                           // read 
L3 PTE
        ;;
 #ifdef CONFIG_SMP
        /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
 
-       ld8 r19=[r17]                           // read L3 PTE again and see if 
same
-       mov r20=PAGE_SHIFT<<2                   // setup page size for purge
+       ld8 r19=[r17]                   // read L3 PTE again and see if same
+       mov r20=PAGE_SHIFT<<2           // setup page size for purge
        ;;
        cmp.ne p7,p0=r18,r19
        ;;
@@ -417,7 +435,7 @@ END(dtlb_miss)
 END(dtlb_miss)
 
        .org ia64_ivt+0x0c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
 ENTRY(alt_itlb_miss)
        DBG_FAULT(3)
@@ -439,14 +457,14 @@ late_alt_itlb_miss:
        ;;
 #endif
 #ifdef CONFIG_DISABLE_VHPT
-       shr.u r22=r16,61                        // get the region number into 
r21
-       ;;
-       cmp.gt p8,p0=6,r22                      // user mode
+       shr.u r22=r16,61                // get the region number into r21
+       ;;
+       cmp.gt p8,p0=6,r22              // user mode
        ;;
 (p8)   thash r17=r16
        ;;
 (p8)   mov cr.iha=r17
-(p8)   mov r29=b0                              // save b0
+(p8)   mov r29=b0                      // save b0
 (p8)   br.cond.dptk .itlb_fault
 #endif
        extr.u r23=r21,IA64_PSR_CPL0_BIT,2      // extract psr.cpl
@@ -463,7 +481,8 @@ late_alt_itlb_miss:
        cmp.ne p8,p0=r0,r23     // psr.cpl != 0?
        or r19=r17,r19          // insert PTE control bits into r19
        ;;
-       or r19=r19,r18          // set bit 4 (uncached) if the access was to 
region 6
+       or r19=r19,r18          // set bit 4 (uncached) if the access was to
+                               //   region 6
 (p8)   br.cond.spnt page_fault
        ;;
        itc.i r19               // insert the TLB entry
@@ -472,7 +491,7 @@ END(alt_itlb_miss)
 END(alt_itlb_miss)
 
        .org ia64_ivt+0x1000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
 ENTRY(alt_dtlb_miss)
        DBG_FAULT(4)
@@ -503,13 +522,15 @@ late_alt_dtlb_miss:
        tbit.nz p6,p7=r20,IA64_ISR_SP_BIT       // is speculation bit on?
 #ifdef XEN
        shr.u r18=r16,55                        // move address bit 59 to bit 4
-       and r19=r19,r16                         // clear ed, reserved bits, and 
PTE control bits
+       and r19=r19,r16                         // clear ed, reserved bits, and
+                                               //   PTE control bits
        tbit.nz p9,p0=r20,IA64_ISR_NA_BIT       // is non-access bit on?
        ;;
        and r18=0x10,r18        // bit 4=address-bit(59)
 #else
        shr.u r18=r16,57                        // move address bit 61 to bit 4
-       and r19=r19,r16                         // clear ed, reserved bits, and 
PTE control bits
+       and r19=r19,r16                         // clear ed, reserved bits, and
+                                               //   PTE control bits
        tbit.nz p9,p0=r20,IA64_ISR_NA_BIT       // is non-access bit on?
        ;;
        andcm r18=0x10,r18      // bit 4=~address-bit(61)
@@ -520,36 +541,41 @@ late_alt_dtlb_miss:
 #ifdef XEN
        ;;
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
-       // Test for the address of virtual frame_table
-       shr r22=r16,56;;
+       shr r22=r16,56          // Test for the address of virtual frame_table
+       ;;
        cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
 (p8)   br.cond.sptk frametable_miss ;;
 #endif
-       // Test for Xen address, if not handle via page_fault
-       // note that 0xf000 (cached) and 0xe800 (uncached) addresses
+       // If it is not a Xen address, handle it via page_fault.
+       // Note that 0xf000 (cached) and 0xe800 (uncached) addresses
        // should be OK.
-       extr.u r22=r16,59,5;;
+       extr.u r22=r16,59,5
+       ;;
        cmp.eq p8,p0=0x1e,r22
-(p8)   br.cond.spnt 1f;;
+(p8)   br.cond.spnt 1f
+       ;;
        cmp.ne p8,p0=0x1d,r22
-(p8)   br.cond.sptk page_fault ;;
+(p8)   br.cond.sptk page_fault
+       ;;
 1:
 #endif
 
        dep r21=-1,r21,IA64_PSR_ED_BIT,1
        or r19=r19,r17          // insert PTE control bits into r19
        ;;
-       or r19=r19,r18          // set bit 4 (uncached) if the access was to 
region 6
+       or r19=r19,r18          // set bit 4 (uncached) if the access was to
+                               //   region 6
 (p6)   mov cr.ipsr=r21
        ;;
 (p7)   itc.d r19               // insert the TLB entry
        mov pr=r31,-1
        rfi
 END(alt_dtlb_miss)
+
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE      
 GLOBAL_ENTRY(frametable_miss)
        rsm psr.dt              // switch to using physical data addressing
-       movl r24=(frametable_pg_dir-PAGE_OFFSET)        // 
r24=__pa(frametable_pg_dir)
+       movl r24=(frametable_pg_dir-PAGE_OFFSET)  // r24=__pa(frametable_pg_dir)
        ;;
        srlz.d
        extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
@@ -583,6 +609,7 @@ GLOBAL_ENTRY(frametable_miss)
        mov pr=r31,-1           // restore predicate registers
        rfi
 END(frametable_miss)
+
 ENTRY(frametable_fault)
        ssm psr.dt              // switch to using virtual data addressing
        mov r18=cr.iip
@@ -590,7 +617,8 @@ ENTRY(frametable_fault)
        ;;
        cmp.eq p6,p7=r18,r19    // is faulting addrress ia64_frametable_probe?
        mov r8=0                // assumes that 'probe.r' uses r8
-       dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instrucition in 
bundle 2
+       dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instruction in
+                                          //   bundle 2
        ;;
 (p6)   mov cr.ipsr=r21
        mov r19=4               // FAULT(4)
@@ -599,6 +627,7 @@ ENTRY(frametable_fault)
        mov pr=r31,-1
        rfi
 END(frametable_fault)
+
 GLOBAL_ENTRY(ia64_frametable_probe)
        {
        probe.r r8=r32,0        // destination register must be r8
@@ -615,18 +644,19 @@ ENTRY(nested_dtlb_miss)
        DBG_FAULT(5)
 #ifdef XEN
        mov b0=r30
-       br.sptk.many b0                         // return to continuation point
-       ;;
-#else
-       /*
-        * In the absence of kernel bugs, we get here when the virtually mapped 
linear
-        * page table is accessed non-speculatively (e.g., in the Dirty-bit, 
Instruction
-        * Access-bit, or Data Access-bit faults).  If the DTLB entry for the 
virtual page
-        * table is missing, a nested TLB miss fault is triggered and control is
-        * transferred to this point.  When this happens, we lookup the pte for 
the
-        * faulting address by walking the page table in physical mode and 
return to the
-        * continuation point passed in register r30 (or call page_fault if the 
address is
-        * not mapped).
+       br.sptk.many b0                 // return to the continuation point
+       ;;
+#else
+       /*
+        * In the absence of kernel bugs, we get here when the virtually
+        * mapped linear page table is accessed non-speculatively (e.g.,
+        * in the Dirty-bit, Instruction Access-bit, or Data Access-bit 
+        * faults).  If the DTLB entry for the virtual page table is missing,
+        * a nested TLB miss fault is triggered and control is transferred 
+        * to this point.  When this happens, we lookup the pte for the
+        * faulting address by walking the page table in physical mode
+        * and return to the continuation point passed in register r30
+        * (or call page_fault if the address is not mapped).
         *
         * Input:       r16:    faulting address
         *              r29:    saved b0
@@ -640,47 +670,52 @@ ENTRY(nested_dtlb_miss)
         *
         * Clobbered:   b0, r18, r19, r21, psr.dt (cleared)
         */
-       rsm psr.dt                              // switch to using physical 
data addressing
-       mov r19=IA64_KR(PT_BASE)                // get the page table base 
address
-       shl r21=r16,3                           // shift bit 60 into sign bit
-       ;;
-       shr.u r17=r16,61                        // get the region number into 
r17
-       ;;
-       cmp.eq p6,p7=5,r17                      // is faulting address in 
region 5?
-       shr.u r18=r16,PGDIR_SHIFT               // get bits 33-63 of faulting 
address
-       ;;
-(p7)   dep r17=r17,r19,(PAGE_SHIFT-3),3        // put region number bits in 
place
+       rsm psr.dt                      // switch to using physical data 
+                                       //   addressing
+       mov r19=IA64_KR(PT_BASE)        // get the page table base address
+       shl r21=r16,3                   // shift bit 60 into sign bit
+       ;;
+       shr.u r17=r16,61                // get the region number into r17
+       ;;
+       cmp.eq p6,p7=5,r17              // is faulting address in region 5?
+       shr.u r18=r16,PGDIR_SHIFT       // get bits 33-63 of faulting address
+       ;;
+(p7)   dep r17=r17,r19,(PAGE_SHIFT-3),3  // put region number bits in place
 
        srlz.d
-       LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at 
swapper_pg_dir
+       LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at 
+                                               //   swapper_pg_dir
 
        .pred.rel "mutex", p6, p7
 (p6)   shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
 (p7)   shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
        ;;
-(p6)   dep r17=r18,r19,3,(PAGE_SHIFT-3)        // r17=PTA + IFA(33,42)*8
-(p7)   dep r17=r18,r17,3,(PAGE_SHIFT-6)        // r17=PTA + (((IFA(61,63) << 
7) | IFA(33,39))*8)
-       cmp.eq p7,p6=0,r21                      // unused address bits all 
zeroes?
-       shr.u r18=r16,PMD_SHIFT                 // shift L2 index into position
-       ;;
-       ld8 r17=[r17]                           // fetch the L1 entry (may be 0)
-       ;;
-(p7)   cmp.eq p6,p7=r17,r0                     // was L1 entry NULL?
-       dep r17=r18,r17,3,(PAGE_SHIFT-3)        // compute address of L2 page 
table entry
-       ;;
-(p7)   ld8 r17=[r17]                           // fetch the L2 entry (may be 0)
-       shr.u r19=r16,PAGE_SHIFT                // shift L3 index into position
-       ;;
-(p7)   cmp.eq.or.andcm p6,p7=r17,r0            // was L2 entry NULL?
-       dep r17=r19,r17,3,(PAGE_SHIFT-3)        // compute address of L3 page 
table entry
+(p6)   dep r17=r18,r19,3,(PAGE_SHIFT-3)  // r17=PTA + IFA(33,42)*8
+(p7)   dep r17=r18,r17,3,(PAGE_SHIFT-6)  // r17=PTA + (((IFA(61,63) << 7) |
+                                         //            IFA(33,39))*8)
+       cmp.eq p7,p6=0,r21              // unused address bits all zeroes?
+       shr.u r18=r16,PMD_SHIFT         // shift L2 index into position
+       ;;
+       ld8 r17=[r17]                   // fetch the L1 entry (may be 0)
+       ;;
+(p7)   cmp.eq p6,p7=r17,r0             // was L1 entry NULL?
+       dep r17=r18,r17,3,(PAGE_SHIFT-3)  // compute address of L2 page table
+                                         //   entry
+       ;;
+(p7)   ld8 r17=[r17]                   // fetch the L2 entry (may be 0)
+       shr.u r19=r16,PAGE_SHIFT        // shift L3 index into position
+       ;;
+(p7)   cmp.eq.or.andcm p6,p7=r17,r0    // was L2 entry NULL?
+       dep r17=r19,r17,3,(PAGE_SHIFT-3)  // compute address of L3 page table
+                                         //   entry
 (p6)   br.cond.spnt page_fault
        mov b0=r30
-       br.sptk.many b0                         // return to continuation point
+       br.sptk.many b0                 // return to continuation point
 #endif
 END(nested_dtlb_miss)
 
        .org ia64_ivt+0x1800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
 ENTRY(ikey_miss)
        DBG_FAULT(6)
@@ -691,8 +726,9 @@ ENTRY(ikey_miss)
 #endif
 END(ikey_miss)
 
-       
//-----------------------------------------------------------------------------------
-       // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is 
faulting address)
+       //----------------------------------------------------------------
+       // call do_page_fault (predicates are in r31, psr.dt may be off, 
+       // r16 is faulting address)
 #ifdef XEN
 GLOBAL_ENTRY(page_fault)
 #else
@@ -713,24 +749,25 @@ ENTRY(page_fault)
        mov out0=cr.ifa
        mov out1=cr.isr
 #endif
-       adds r3=8,r2                            // set up second base pointer
+       adds r3=8,r2                    // set up second base pointer
        ;;
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collectin is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
        movl r14=ia64_leave_kernel
        ;;
        SAVE_REST
        mov rp=r14
        ;;
-       adds out2=16,r12                        // out2 = pointer to pt_regs
+       adds out2=16,r12                // out2 = pointer to pt_regs
        br.call.sptk.many b6=ia64_do_page_fault // ignore return address
 END(page_fault)
 
        .org ia64_ivt+0x1c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
 ENTRY(dkey_miss)
        DBG_FAULT(7)
@@ -742,32 +779,33 @@ END(dkey_miss)
 END(dkey_miss)
 
        .org ia64_ivt+0x2000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
 ENTRY(dirty_bit)
        DBG_FAULT(8)
 #ifdef XEN
        mov r20=cr.ipsr
-       mov r31=pr;;
-       extr.u r20=r20,IA64_PSR_CPL0_BIT,2;;
-       mov r19=8       /* prepare to save predicates */
-       cmp.eq p6,p0=r0,r20     /* cpl == 0?*/
+       mov r31=pr
+       ;;
+       extr.u r20=r20,IA64_PSR_CPL0_BIT,2
+       ;;
+       mov r19=8                       // prepare to save predicates
+       cmp.eq p6,p0=r0,r20             // cpl == 0?
 (p6)   br.sptk.few dispatch_to_fault_handler
-       /* If shadow mode is not enabled, reflect the fault.  */
+       // If shadow mode is not enabled, reflect the fault.
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
        ;;
        ld8 r22=[r22]
        ;;
        add r22=IA64_VCPU_DOMAIN_OFFSET,r22
        ;;
-       /* Read domain.  */
+       ld8 r22=[r22]                   // read domain
+       ;;
+       add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
+       ;;
        ld8 r22=[r22]
        ;;
-       add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
-       ;;
-       ld8 r22=[r22]
-       ;;
-       cmp.eq p6,p0=r0,r22     /* !shadow_bitmap ?*/
+       cmp.eq p6,p0=r0,r22             // !shadow_bitmap ?
 (p6)   br.dptk.many dispatch_reflection
 
        SAVE_MIN_WITH_COVER
@@ -779,10 +817,11 @@ ENTRY(dirty_bit)
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
+       adds r3=8,r2                    // set up second base pointer
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
@@ -791,65 +830,69 @@ ENTRY(dirty_bit)
        br.call.sptk.many b6=ia64_shadow_fault
 #else
        /*
-        * What we do here is to simply turn on the dirty bit in the PTE.  We 
need to
-        * update both the page-table and the TLB entry.  To efficiently access 
the PTE,
-        * we address it through the virtual page table.  Most likely, the TLB 
entry for
-        * the relevant virtual page table page is still present in the TLB so 
we can
-        * normally do this without additional TLB misses.  In case the 
necessary virtual
-        * page table TLB entry isn't present, we take a nested TLB miss hit 
where we look
-        * up the physical address of the L3 PTE and then continue at label 1 
below.
-        */
-       mov r16=cr.ifa                          // get the address that caused 
the fault
-       movl r30=1f                             // load continuation point in 
case of nested fault
-       ;;
-       thash r17=r16                           // compute virtual address of 
L3 PTE
-       mov r29=b0                              // save b0 in case of nested 
fault
-       mov r31=pr                              // save pr
+        * What we do here is to simply turn on the dirty bit in the PTE.
+        * We need to update both the page-table and the TLB entry.  To 
+        * efficiently access the PTE, we address it through the virtual
+        * page table.  Most likely, the TLB entry for the relevant virtual
+        * page table page is still present in the TLB so we can normally 
+        * do this without additional TLB misses.  In case the necessary 
+        * virtual page table TLB entry isn't present, we take a nested 
+        * TLB miss hit where we look up the physical address of the L3
+        * PTE and then continue at label 1 below.
+        */
+       mov r16=cr.ifa                  // get the address that caused the 
+                                       //   fault
+       movl r30=1f                     // load continuation point in case 
+                                       //   of nested fault
+       ;;
+       thash r17=r16                   // compute virtual address of L3 PTE
+       mov r29=b0                      // save b0 in case of nested fault
+       mov r31=pr                      // save pr
 #ifdef CONFIG_SMP
-       mov r28=ar.ccv                          // save ar.ccv
+       mov r28=ar.ccv                  // save ar.ccv
        ;;
 1:     ld8 r18=[r17]
-       ;;                                      // avoid RAW on r18
-       mov ar.ccv=r18                          // set compare value for cmpxchg
-       or r25=_PAGE_D|_PAGE_A,r18              // set the dirty and accessed 
bits
+       ;;                              // avoid RAW on r18
+       mov ar.ccv=r18                  // set compare value for cmpxchg
+       or r25=_PAGE_D|_PAGE_A,r18      // set the dirty and accessed bits
        ;;
        cmpxchg8.acq r26=[r17],r25,ar.ccv
        mov r24=PAGE_SHIFT<<2
        ;;
        cmp.eq p6,p7=r26,r18
        ;;
-(p6)   itc.d r25                               // install updated PTE
-       ;;
-       /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+(p6)   itc.d r25                       // install updated PTE
+       ;;
+       /*
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
 
-       ld8 r18=[r17]                           // read PTE again
-       ;;
-       cmp.eq p6,p7=r18,r25                    // is it same as the newly 
installed
+       ld8 r18=[r17]                   // read PTE again
+       ;;
+       cmp.eq p6,p7=r18,r25            // is it same as the newly installed
        ;;
 (p7)   ptc.l r16,r24
-       mov b0=r29                              // restore b0
+       mov b0=r29                      // restore b0
        mov ar.ccv=r28
 #else
        ;;
 1:     ld8 r18=[r17]
-       ;;                                      // avoid RAW on r18
-       or r18=_PAGE_D|_PAGE_A,r18              // set the dirty and accessed 
bits
-       mov b0=r29                              // restore b0
-       ;;
-       st8 [r17]=r18                           // store back updated PTE
-       itc.d r18                               // install updated PTE
-#endif
-       mov pr=r31,-1                           // restore pr
+       ;;                              // avoid RAW on r18
+       or r18=_PAGE_D|_PAGE_A,r18      // set the dirty and accessed bits
+       mov b0=r29                      // restore b0
+       ;;
+       st8 [r17]=r18                   // store back updated PTE
+       itc.d r18                       // install updated PTE
+#endif
+       mov pr=r31,-1                   // restore pr
        rfi
 #endif
 END(dirty_bit)
 
        .org ia64_ivt+0x2400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
 ENTRY(iaccess_bit)
        DBG_FAULT(9)
@@ -862,9 +905,11 @@ ENTRY(iaccess_bit)
        br.sptk.many fast_access_reflect;;
 #else
        // Like Entry 8, except for instruction access
-       mov r16=cr.ifa                          // get the address that caused 
the fault
-       movl r30=1f                             // load continuation point in 
case of nested fault
-       mov r31=pr                              // save predicates
+       mov r16=cr.ifa                  // get the address that caused the
+                                       //   fault
+       movl r30=1f                     // load continuation point in case 
+                                       //   of nested fault
+       mov r31=pr                      // save predicates
 #ifdef CONFIG_ITANIUM
        /*
         * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
@@ -872,50 +917,50 @@ ENTRY(iaccess_bit)
        mov r17=cr.ipsr
        ;;
        mov r18=cr.iip
-       tbit.z p6,p0=r17,IA64_PSR_IS_BIT        // IA64 instruction set?
-       ;;
-(p6)   mov r16=r18                             // if so, use cr.iip instead of 
cr.ifa
+       tbit.z p6,p0=r17,IA64_PSR_IS_BIT  // IA64 instruction set?
+       ;;
+(p6)   mov r16=r18                     // if so, use cr.iip instead of cr.ifa
 #endif /* CONFIG_ITANIUM */
        ;;
-       thash r17=r16                           // compute virtual address of 
L3 PTE
-       mov r29=b0                              // save b0 in case of nested 
fault)
+       thash r17=r16                   // compute virtual address of L3 PTE
+       mov r29=b0                      // save b0 in case of nested fault)
 #ifdef CONFIG_SMP
-       mov r28=ar.ccv                          // save ar.ccv
+       mov r28=ar.ccv                  // save ar.ccv
        ;;
 1:     ld8 r18=[r17]
        ;;
-       mov ar.ccv=r18                          // set compare value for cmpxchg
-       or r25=_PAGE_A,r18                      // set the accessed bit
+       mov ar.ccv=r18                  // set compare value for cmpxchg
+       or r25=_PAGE_A,r18              // set the accessed bit
        ;;
        cmpxchg8.acq r26=[r17],r25,ar.ccv
        mov r24=PAGE_SHIFT<<2
        ;;
        cmp.eq p6,p7=r26,r18
        ;;
-(p6)   itc.i r25                               // install updated PTE
-       ;;
-       /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+(p6)   itc.i r25                       // install updated PTE
+       ;;
+       /*
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
 
-       ld8 r18=[r17]                           // read PTE again
-       ;;
-       cmp.eq p6,p7=r18,r25                    // is it same as the newly 
installed
+       ld8 r18=[r17]                   // read PTE again
+       ;;
+       cmp.eq p6,p7=r18,r25            // is it same as the newly installed
        ;;
 (p7)   ptc.l r16,r24
-       mov b0=r29                              // restore b0
+       mov b0=r29                      // restore b0
        mov ar.ccv=r28
 #else /* !CONFIG_SMP */
        ;;
 1:     ld8 r18=[r17]
        ;;
-       or r18=_PAGE_A,r18                      // set the accessed bit
-       mov b0=r29                              // restore b0
-       ;;
-       st8 [r17]=r18                           // store back updated PTE
-       itc.i r18                               // install updated PTE
+       or r18=_PAGE_A,r18              // set the accessed bit
+       mov b0=r29                      // restore b0
+       ;;
+       st8 [r17]=r18                   // store back updated PTE
+       itc.i r18                       // install updated PTE
 #endif /* !CONFIG_SMP */
        mov pr=r31,-1
        rfi
@@ -923,7 +968,7 @@ END(iaccess_bit)
 END(iaccess_bit)
 
        .org ia64_ivt+0x2800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
 ENTRY(daccess_bit)
        DBG_FAULT(10)
@@ -933,74 +978,80 @@ ENTRY(daccess_bit)
        mov r31=pr
        mov r19=10
        mov r20=0x2800
-       br.sptk.many fast_access_reflect;;
+       br.sptk.many fast_access_reflect
+       ;;
 #else
        // Like Entry 8, except for data access
-       mov r16=cr.ifa                          // get the address that caused 
the fault
-       movl r30=1f                             // load continuation point in 
case of nested fault
-       ;;
-       thash r17=r16                           // compute virtual address of 
L3 PTE
+       mov r16=cr.ifa                  // get the address that caused the
+                                       //   fault
+       movl r30=1f                     // load continuation point in case
+                                       //   of nested fault
+       ;;
+       thash r17=r16                   // compute virtual address of L3 PTE
        mov r31=pr
-       mov r29=b0                              // save b0 in case of nested 
fault)
+       mov r29=b0                      // save b0 in case of nested fault)
 #ifdef CONFIG_SMP
-       mov r28=ar.ccv                          // save ar.ccv
+       mov r28=ar.ccv                  // save ar.ccv
        ;;
 1:     ld8 r18=[r17]
-       ;;                                      // avoid RAW on r18
-       mov ar.ccv=r18                          // set compare value for cmpxchg
-       or r25=_PAGE_A,r18                      // set the dirty bit
+       ;;                              // avoid RAW on r18
+       mov ar.ccv=r18                  // set compare value for cmpxchg
+       or r25=_PAGE_A,r18              // set the dirty bit
        ;;
        cmpxchg8.acq r26=[r17],r25,ar.ccv
        mov r24=PAGE_SHIFT<<2
        ;;
        cmp.eq p6,p7=r26,r18
        ;;
-(p6)   itc.d r25                               // install updated PTE
-       /*
-        * Tell the assemblers dependency-violation checker that the above 
"itc" instructions
-        * cannot possibly affect the following loads:
+(p6)   itc.d r25                       // install updated PTE
+       /*
+        * Tell the assemblers dependency-violation checker that the above
+        * "itc" instructions cannot possibly affect the following loads:
         */
        dv_serialize_data
        ;;
-       ld8 r18=[r17]                           // read PTE again
-       ;;
-       cmp.eq p6,p7=r18,r25                    // is it same as the newly 
installed
+       ld8 r18=[r17]                   // read PTE again
+       ;;
+       cmp.eq p6,p7=r18,r25            // is it same as the newly installed
        ;;
 (p7)   ptc.l r16,r24
        mov ar.ccv=r28
 #else
        ;;
 1:     ld8 r18=[r17]
-       ;;                                      // avoid RAW on r18
-       or r18=_PAGE_A,r18                      // set the accessed bit
-       ;;
-       st8 [r17]=r18                           // store back updated PTE
-       itc.d r18                               // install updated PTE
-#endif
-       mov b0=r29                              // restore b0
+       ;;                              // avoid RAW on r18
+       or r18=_PAGE_A,r18              // set the accessed bit
+       ;;
+       st8 [r17]=r18                   // store back updated PTE
+       itc.d r18                       // install updated PTE
+#endif
+       mov b0=r29                      // restore b0
        mov pr=r31,-1
        rfi
 #endif
 END(daccess_bit)
 
        .org ia64_ivt+0x2c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
 ENTRY(break_fault)
        /*
-        * The streamlined system call entry/exit paths only save/restore the 
initial part
-        * of pt_regs.  This implies that the callers of system-calls must 
adhere to the
-        * normal procedure calling conventions.
+        * The streamlined system call entry/exit paths only save/restore 
+        * the initial part of pt_regs.  This implies that the callers of
+        * system-calls must adhere to the normal procedure calling 
+        * conventions.
         *
         *   Registers to be saved & restored:
         *      CR registers: cr.ipsr, cr.iip, cr.ifs
-        *      AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, 
ar.fpsr
+        *      AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
+        *                    ar.fpsr
         *      others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
         *   Registers to be restored only:
         *      r8-r11: output value from the system call.
         *
-        * During system call exit, scratch registers (including r15) are 
modified/cleared
-        * to prevent leaking bits from kernel to user level.
+        * During system call exit, scratch registers (including r15) are
+        * modified/cleared to prevent leaking bits from kernel to user 
+        * level.
         */
        DBG_FAULT(11)
 #ifdef XEN
@@ -1009,13 +1060,17 @@ ENTRY(break_fault)
        mov r31=pr
        ;;
        cmp.eq p7,p0=r17,r0
-(p7)   br.spnt.few dispatch_break_fault ;;
+(p7)   br.spnt.few dispatch_break_fault
+       ;;
 #ifdef CRASH_DEBUG
-        // panic can occur before domain0 is created.
-        // in such case referencing XSI_PSR_IC causes nested_dtlb_miss
-        movl r18=CDB_BREAK_NUM ;;
-        cmp.eq p7,p0=r17,r18 ;; 
-(p7)    br.spnt.few dispatch_break_fault ;;
+        // A panic can occur before domain0 is created.  In such cases, 
+       // referencing XSI_PSR_IC causes nested_dtlb_miss.
+        movl r18=CDB_BREAK_NUM
+       ;;
+        cmp.eq p7,p0=r17,r18
+       ;; 
+(p7)    br.spnt.few dispatch_break_fault
+       ;;
 #endif
        movl r18=THIS_CPU(current_psr_ic_addr)
        ;;
@@ -1026,17 +1081,19 @@ ENTRY(break_fault)
        cmp.eq p7,p0=r0,r17                     // is this a psuedo-cover?
 (p7)   br.spnt.many dispatch_privop_fault
        ;;
-       // if vpsr.ic is off, we have a hyperprivop
-       // A hyperprivop is hand-coded assembly with psr.ic off
-       // which means no calls, no use of r1-r15 and no memory accesses
-       // except to pinned addresses!
+       // If vpsr.ic is off, we have a hyperprivop.  A hyperprivop is
+       // hand-coded assembly with psr.ic off which means it can make
+       // no calls, cannot use r1-r15, and it can have no memory accesses
+       // unless they are to pinned addresses!
        cmp4.eq p7,p0=r0,r19
 (p7)   br.sptk.many fast_hyperprivop
        ;;
-       movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
+       movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
+       ;;
        ld8 r22 = [r22]
        ;;
-       adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
+       adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22
+       ;;
        ld4 r23=[r22];;
        cmp4.eq p6,p7=r23,r17                   // Xen-reserved breakimm?
 (p6)   br.spnt.many dispatch_break_fault
@@ -1056,78 +1113,86 @@ ENTRY(break_fault)
        mov r26=ar.pfs
        mov r28=cr.iip
 #ifndef XEN
-       mov r31=pr                              // prepare to save predicates
+       mov r31=pr                      // prepare to save predicates
 #endif
        mov r20=r1
        ;;
        adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
-       cmp.eq p0,p7=r18,r17                    // is this a system call? (p7 
<- false, if so)
+       cmp.eq p0,p7=r18,r17            // is this a system call? 
+                                       //   (p7 <- false, if so)
 (p7)   br.cond.spnt non_syscall
        ;;
-       ld1 r17=[r16]                           // load 
current->thread.on_ustack flag
-       st1 [r16]=r0                            // clear 
current->thread.on_ustack flag
-       add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16   // set r1 for 
MINSTATE_START_SAVE_MIN_VIRT
+       ld1 r17=[r16]                   // load current->thread.on_ustack flag
+       st1 [r16]=r0                    // clear current->thread.on_ustack flag
+       add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
+                                       // set r1 for 
+                                       //   MINSTATE_START_SAVE_MIN_VIRT
        ;;
        invala
 
        /* adjust return address so we skip over the break instruction: */
 
-       extr.u r8=r29,41,2                      // extract ei field from cr.ipsr
-       ;;
-       cmp.eq p6,p7=2,r8                       // isr.ei==2?
-       mov r2=r1                               // setup r2 for 
ia64_syscall_setup
-       ;;
-(p6)   mov r8=0                                // clear ei to 0
-(p6)   adds r28=16,r28                         // switch cr.iip to next bundle 
cr.ipsr.ei wrapped
-(p7)   adds r8=1,r8                            // increment ei to next slot
-       ;;
-       cmp.eq pKStk,pUStk=r0,r17               // are we in kernel mode 
already?
-       dep r29=r8,r29,41,2                     // insert new ei into cr.ipsr
+       extr.u r8=r29,41,2              // extract ei field from cr.ipsr
+       ;;
+       cmp.eq p6,p7=2,r8               // isr.ei==2?
+       mov r2=r1                       // setup r2 for ia64_syscall_setup
+       ;;
+(p6)   mov r8=0                        // clear ei to 0
+(p6)   adds r28=16,r28                 // switch cr.iip to next bundle 
+                                       //   cr.ipsr.ei wrapped
+(p7)   adds r8=1,r8                    // increment ei to next slot
+       ;;
+       cmp.eq pKStk,pUStk=r0,r17       // are we in kernel mode already?
+       dep r29=r8,r29,41,2             // insert new ei into cr.ipsr
        ;;
 
        // switch from user to kernel RBS:
        MINSTATE_START_SAVE_MIN_VIRT
        br.call.sptk.many b7=ia64_syscall_setup
        ;;
-       MINSTATE_END_SAVE_MIN_VIRT              // switch to bank 1
+       MINSTATE_END_SAVE_MIN_VIRT      // switch to bank 1
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
        mov r3=NR_syscalls - 1
        ;;
-(p15)  ssm psr.i                               // restore psr.i
+(p15)  ssm psr.i                       // restore psr.i
        // p10==true means out registers are more than 8 or r15's Nat is true
 (p10)  br.cond.spnt.many ia64_ret_from_syscall
        ;;
        movl r16=sys_call_table
 
-       adds r15=-1024,r15                      // r15 contains the syscall 
number---subtract 1024
+       adds r15=-1024,r15              // r15 contains the syscall number --
+                                       //   subtract 1024 from it
        movl r2=ia64_ret_from_syscall
        ;;
-       shladd r20=r15,3,r16                    // r20 = sys_call_table + 
8*(syscall-1024)
-       cmp.leu p6,p7=r15,r3                    // (syscall > 0 && syscall < 
1024 + NR_syscalls) ?
-       mov rp=r2                               // set the real return addr
-       ;;
-(p6)   ld8 r20=[r20]                           // load address of syscall 
entry point
+       shladd r20=r15,3,r16            // r20 = sys_call_table + 
+                                       //       8*(syscall-1024)
+       cmp.leu p6,p7=r15,r3            // (syscall > 0 && syscall < 1024 +
+                                       //  NR_syscalls) ?
+       mov rp=r2                       // set the real return addr
+       ;;
+(p6)   ld8 r20=[r20]                   // load address of syscall entry point
 (p7)   movl r20=sys_ni_syscall
 
        add r2=TI_FLAGS+IA64_TASK_SIZE,r13
        ;;
-       ld4 r2=[r2]                             // r2 = 
current_thread_info()->flags
-       ;;
-       and r2=_TIF_SYSCALL_TRACEAUDIT,r2       // mask trace or audit
+       ld4 r2=[r2]                     // r2 = current_thread_info()->flags
+       ;;
+       and r2=_TIF_SYSCALL_TRACEAUDIT,r2  // mask trace or audit
        ;;
        cmp.eq p8,p0=r2,r0
        mov b6=r20
        ;;
-(p8)   br.call.sptk.many b6=b6                 // ignore this return addr
+(p8)   br.call.sptk.many b6=b6         // ignore this return addr
        br.cond.sptk ia64_trace_syscall
        // NOT REACHED
 #endif
 END(break_fault)
 
        .org ia64_ivt+0x3000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
 ENTRY(interrupt)
        DBG_FAULT(12)
@@ -1138,11 +1203,16 @@ ENTRY(interrupt)
        // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
        // not used anywhere else and we need a place to stash ivr and
        // there's no registers available unused by SAVE_MIN/REST
-       movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
-       st8 [r29]=r30;;
-       movl r28=slow_interrupt;;
-       mov r29=rp;;
-       mov rp=r28;;
+       movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
+       ;;
+       st8 [r29]=r30
+       ;;
+       movl r28=slow_interrupt
+       ;;
+       mov r29=rp
+       ;;
+       mov rp=r28
+       ;;
        br.cond.sptk.many fast_tick_reflect
        ;;
 slow_interrupt:
@@ -1175,16 +1245,16 @@ END(interrupt)
 END(interrupt)
 
        .org ia64_ivt+0x3400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x3400 Entry 13 (size 64 bundles) Reserved
        DBG_FAULT(13)
        FAULT(13)
 
 #ifdef XEN
-       // There is no particular reason for this code to be here, other than 
that
-       // there happens to be space here that would go unused otherwise.  If 
this
-       // fault ever gets "unreserved", simply moved the following code to a 
more
-       // suitable spot...
+       // There is no particular reason for this code to be here, other
+       // than that there happens to be space here that would go unused 
+       // otherwise.  If this fault ever gets "unreserved", simply move
+       // the following code to a more suitable spot...
 
 GLOBAL_ENTRY(dispatch_break_fault)
        SAVE_MIN_WITH_COVER
@@ -1198,32 +1268,32 @@ dispatch_break_fault_post_save:
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer
+       srlz.i                  // guarantee that interruption collection is on
+       ;;
+(p15)  ssm psr.i               // restore psr.i
+       adds r3=8,r2            // set up second base pointer
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
        ;;
        mov rp=r14
-//     br.sptk.many ia64_prepare_handle_break
-    br.call.sptk.many b6=ia64_handle_break
+//     br.sptk.many ia64_prepare_handle_break  // TODO: why commented out?
+       br.call.sptk.many b6=ia64_handle_break
 END(dispatch_break_fault)
 #endif
 
        .org ia64_ivt+0x3800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x3800 Entry 14 (size 64 bundles) Reserved
        DBG_FAULT(14)
        FAULT(14)
 
 #ifndef XEN
        /*
-        * There is no particular reason for this code to be here, other than 
that
-        * there happens to be space here that would go unused otherwise.  If 
this
-        * fault ever gets "unreserved", simply moved the following code to a 
more
-        * suitable spot...
+        * There is no particular reason for this code to be here, other 
+        * than that there happens to be space here that would go unused 
+        * otherwise.  If this fault ever gets "unreserved", simply move
+        * the following code to a more suitable spot...
         *
         * ia64_syscall_setup() is a separate subroutine so that it can
         *      allocate stacked registers so it can safely demine any
@@ -1271,11 +1341,11 @@ GLOBAL_ENTRY(ia64_syscall_setup)
 # error This code assumes that b6 is the first field in pt_regs.
 #endif
 #endif
-       st8 [r1]=r19                            // save b6
-       add r16=PT(CR_IPSR),r1                  // initialize first base pointer
-       add r17=PT(R11),r1                      // initialize second base 
pointer
-       ;;
-       alloc r19=ar.pfs,8,0,0,0                // ensure in0-in7 are writable
+       st8 [r1]=r19                    // save b6
+       add r16=PT(CR_IPSR),r1          // initialize first base pointer
+       add r17=PT(R11),r1              // initialize second base pointer
+       ;;
+       alloc r19=ar.pfs,8,0,0,0        // ensure in0-in7 are writable
        st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)    // save cr.ipsr
        tnat.nz p8,p0=in0
 
@@ -1312,18 +1382,20 @@ GLOBAL_ENTRY(ia64_syscall_setup)
        tnat.nz p11,p0=in3
        ;;
 (p10)  mov in2=-1
-       tnat.nz p12,p0=in4                              // [I0]
+       tnat.nz p12,p0=in4                      // [I0]
 (p11)  mov in3=-1
        ;;
 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)       // save ar.rnat
 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)   // save ar.bspstore
-       shl r18=r18,16                          // compute ar.rsc to be used 
for "loadrs"
+       shl r18=r18,16                          // compute ar.rsc to be used
+                                               //   for "loadrs"
        ;;
        st8 [r16]=r31,PT(LOADRS)-PT(PR)         // save predicates
        st8 [r17]=r28,PT(R1)-PT(B0)             // save b0
-       tnat.nz p13,p0=in5                              // [I0]
-       ;;
-       st8 [r16]=r18,PT(R12)-PT(LOADRS)        // save ar.rsc value for 
"loadrs"
+       tnat.nz p13,p0=in5                      // [I0]
+       ;;
+       st8 [r16]=r18,PT(R12)-PT(LOADRS)        // save ar.rsc value for
+                                               //   "loadrs"
        st8.spill [r17]=r20,PT(R13)-PT(R1)      // save original r1
 (p12)  mov in4=-1
        ;;
@@ -1336,32 +1408,34 @@ GLOBAL_ENTRY(ia64_syscall_setup)
        tnat.nz p14,p0=in6
        cmp.lt p10,p9=r11,r8    // frame size can't be more than local+8
        ;;
-       stf8 [r16]=f1           // ensure pt_regs.r8 != 0 (see 
handle_syscall_error)
+       stf8 [r16]=f1           // ensure pt_regs.r8 != 0 
+                               //   (see handle_syscall_error)
 (p9)   tnat.nz p10,p0=r15
-       adds r12=-16,r1         // switch to kernel memory stack (with 16 bytes 
of scratch)
-
-       st8.spill [r17]=r15                     // save r15
+       adds r12=-16,r1         // switch to kernel memory stack (with 16 
+                               //   bytes of scratch)
+
+       st8.spill [r17]=r15     // save r15
        tnat.nz p8,p0=in7
        nop.i 0
 
-       mov r13=r2                              // establish `current'
-       movl r1=__gp                            // establish kernel global 
pointer
+       mov r13=r2              // establish `current'
+       movl r1=__gp            // establish kernel global pointer
        ;;
 (p14)  mov in6=-1
 (p8)   mov in7=-1
        nop.i 0
 
-       cmp.eq pSys,pNonSys=r0,r0               // set pSys=1, pNonSys=0
+       cmp.eq pSys,pNonSys=r0,r0       // set pSys=1, pNonSys=0
        movl r17=FPSR_DEFAULT
        ;;
-       mov.m ar.fpsr=r17                       // set ar.fpsr to kernel 
default value
+       mov.m ar.fpsr=r17               // set ar.fpsr to kernel default value
 (p10)  mov r8=-EINVAL
        br.ret.sptk.many b7
 END(ia64_syscall_setup)
 #endif /* XEN */
        
        .org ia64_ivt+0x3c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x3c00 Entry 15 (size 64 bundles) Reserved
        DBG_FAULT(15)
        FAULT(15)
@@ -1370,11 +1444,12 @@ END(ia64_syscall_setup)
        /*
         * Squatting in this space ...
         *
-        * This special case dispatcher for illegal operation faults allows 
preserved
-        * registers to be modified through a callback function (asm only) that 
is handed
-        * back from the fault handler in r8. Up to three arguments can be 
passed to the
-        * callback function by returning an aggregate with the callback as its 
first
-        * element, followed by the arguments.
+        * This special case dispatcher for illegal operation faults 
+        * allows preserved registers to be modified through a callback
+        * function (asm only) that is handed back from the fault handler
+        * in r8.  Up to three arguments can be passed to the callback
+        * function by returning an aggregate with the callback as its 
+        * first element, followed by the arguments.
         */
 ENTRY(dispatch_illegal_op_fault)
        SAVE_MIN_WITH_COVER
@@ -1408,21 +1483,22 @@ END(dispatch_illegal_op_fault)
 #endif
 
        .org ia64_ivt+0x4000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x4000 Entry 16 (size 64 bundles) Reserved
        DBG_FAULT(16)
        FAULT(16)
 
 #ifdef XEN
-       // There is no particular reason for this code to be here, other than 
that
-       // there happens to be space here that would go unused otherwise.  If 
this
-       // fault ever gets "unreserved", simply moved the following code to a 
more
-       // suitable spot...
+       // There is no particular reason for this code to be here, other
+       // than that there happens to be space here that would go unused 
+       // otherwise.  If this fault ever gets "unreserved", simply move
+       // the following code to a more suitable spot...
 
 ENTRY(dispatch_privop_fault)
        SAVE_MIN_WITH_COVER
        ;;
-       alloc r14=ar.pfs,0,0,4,0                // now it's safe (must be first 
in insn group!)
+       alloc r14=ar.pfs,0,0,4,0        // now it's safe (must be first in
+                                       //   insn group!)
        mov out0=cr.ifa
        adds out1=16,sp
        mov out2=cr.isr         // FIXME: pity to make this slow access twice
@@ -1430,23 +1506,24 @@ ENTRY(dispatch_privop_fault)
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
+       adds r3=8,r2                    // set up second base pointer
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
        ;;
        mov rp=r14
-//     br.sptk.many ia64_prepare_handle_privop
-     br.call.sptk.many b6=ia64_handle_privop
+//     br.sptk.many ia64_prepare_handle_privop  // TODO: why commented out?
+       br.call.sptk.many b6=ia64_handle_privop
 END(dispatch_privop_fault)
 #endif
 
 
        .org ia64_ivt+0x4400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x4400 Entry 17 (size 64 bundles) Reserved
        DBG_FAULT(17)
        FAULT(17)
@@ -1455,77 +1532,80 @@ ENTRY(non_syscall)
 ENTRY(non_syscall)
        SAVE_MIN_WITH_COVER
 
-       // There is no particular reason for this code to be here, other than 
that
-       // there happens to be space here that would go unused otherwise.  If 
this
-       // fault ever gets "unreserved", simply moved the following code to a 
more
-       // suitable spot...
+       // There is no particular reason for this code to be here, other
+       // than that there happens to be space here that would go unused 
+       // otherwise.  If this fault ever gets "unreserved", simply move
+       // the following code to a more suitable spot...
 
        alloc r14=ar.pfs,0,0,2,0
        mov out0=cr.iim
        add out1=16,sp
-       adds r3=8,r2                    // set up second base pointer for 
SAVE_REST
+       adds r3=8,r2            // set up second base pointer for SAVE_REST
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                          // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                       // restore psr.i
+       srlz.i                  // guarantee that interruption collection is on
+       ;;
+(p15)  ssm psr.i               // restore psr.i
        movl r15=ia64_leave_kernel
        ;;
        SAVE_REST
        mov rp=r15
        ;;
-       br.call.sptk.many b6=ia64_bad_break     // avoid WAW on CFM and ignore 
return addr
+       br.call.sptk.many b6=ia64_bad_break     // avoid WAW on CFM and 
+                                               //   ignore return addr
 END(non_syscall)
 #endif
 
        .org ia64_ivt+0x4800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x4800 Entry 18 (size 64 bundles) Reserved
        DBG_FAULT(18)
        FAULT(18)
 
 #ifndef XEN
        /*
-        * There is no particular reason for this code to be here, other than 
that
-        * there happens to be space here that would go unused otherwise.  If 
this
-        * fault ever gets "unreserved", simply moved the following code to a 
more
-        * suitable spot...
+        * There is no particular reason for this code to be here, other
+        * than that there happens to be space here that would go unused 
+        * otherwise.  If this fault ever gets "unreserved", simply move
+        * the following code to a more suitable spot...
         */
 ENTRY(dispatch_unaligned_handler)
        SAVE_MIN_WITH_COVER
        ;;
-       alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first 
in insn group!)
+       alloc r14=ar.pfs,0,0,2,0        // now it's safe (must be first in
+                                       //   insn group!)
        mov out0=cr.ifa
        adds out1=16,sp
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
+       adds r3=8,r2                    // set up second base pointer
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
        ;;
        mov rp=r14
-//     br.sptk.many ia64_prepare_handle_unaligned
-    br.call.sptk.many b6=ia64_handle_unaligned
+//     br.sptk.many ia64_prepare_handle_unaligned // TODO: why commented out?
+       br.call.sptk.many b6=ia64_handle_unaligned
 END(dispatch_unaligned_handler)
 #endif
 
        .org ia64_ivt+0x4c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x4c00 Entry 19 (size 64 bundles) Reserved
        DBG_FAULT(19)
        FAULT(19)
 
        /*
-        * There is no particular reason for this code to be here, other than 
that
-        * there happens to be space here that would go unused otherwise.  If 
this
-        * fault ever gets "unreserved", simply moved the following code to a 
more
-        * suitable spot...
+        * There is no particular reason for this code to be here, other 
+        * than that there happens to be space here that would go unused 
+        * otherwise.  If this fault ever gets "unreserved", simply move
+        * the following code to a more suitable spot...
         */
 
 GLOBAL_ENTRY(dispatch_to_fault_handler)
@@ -1545,10 +1625,12 @@ GLOBAL_ENTRY(dispatch_to_fault_handler)
        ;;
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer 
for SAVE_REST
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
+       adds r3=8,r2                    // set up second base pointer for
+                                       //   SAVE_REST
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
@@ -1562,7 +1644,7 @@ END(dispatch_to_fault_handler)
 //
 
        .org ia64_ivt+0x5000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
 ENTRY(page_not_present)
        DBG_FAULT(20)
@@ -1572,8 +1654,9 @@ ENTRY(page_not_present)
        mov r16=cr.ifa
        rsm psr.dt
        /*
-        * The Linux page fault handler doesn't expect non-present pages to be 
in
-        * the TLB.  Flush the existing entry now, so we meet that expectation.
+        * The Linux page fault handler doesn't expect non-present pages
+        * to be in the TLB.  Flush the existing entry now, so we meet 
+        * that expectation.
         */
        mov r17=PAGE_SHIFT<<2
        ;;
@@ -1586,7 +1669,7 @@ END(page_not_present)
 END(page_not_present)
 
        .org ia64_ivt+0x5100
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
 ENTRY(key_permission)
        DBG_FAULT(21)
@@ -1603,7 +1686,7 @@ END(key_permission)
 END(key_permission)
 
        .org ia64_ivt+0x5200
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
 ENTRY(iaccess_rights)
        DBG_FAULT(22)
@@ -1620,17 +1703,19 @@ END(iaccess_rights)
 END(iaccess_rights)
 
        .org ia64_ivt+0x5300
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
 ENTRY(daccess_rights)
        DBG_FAULT(23)
 #ifdef XEN
-       mov r31=pr;
+       mov r31=pr
+       ;;
        mov r16=cr.isr
        mov r17=cr.ifa
        mov r19=23
        movl r20=0x5300
-       br.sptk.many fast_access_reflect;;
+       br.sptk.many fast_access_reflect
+       ;;
 #else
        mov r16=cr.ifa
        rsm psr.dt
@@ -1642,7 +1727,7 @@ END(daccess_rights)
 END(daccess_rights)
 
        .org ia64_ivt+0x5400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
 ENTRY(general_exception)
        DBG_FAULT(24)
@@ -1662,12 +1747,12 @@ END(general_exception)
 END(general_exception)
 
        .org ia64_ivt+0x5500
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
 ENTRY(disabled_fp_reg)
        DBG_FAULT(25)
 #ifdef XEN
-#if 0
+#if 0                          // TODO: can this be removed?
        mov r20=pr
        movl r16=0x2000000000000000
        movl r17=0x2000000000176b60
@@ -1686,7 +1771,7 @@ ENTRY(disabled_fp_reg)
        ;;
 #endif
        FAULT_OR_REFLECT(25)
-//floating_panic:
+//floating_panic:              // TODO: can this be removed?
 //     br.sptk.many floating_panic
        ;;
 #endif
@@ -1699,7 +1784,7 @@ END(disabled_fp_reg)
 END(disabled_fp_reg)
 
        .org ia64_ivt+0x5600
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
 ENTRY(nat_consumption)
        DBG_FAULT(26)
@@ -1711,7 +1796,7 @@ END(nat_consumption)
 END(nat_consumption)
 
        .org ia64_ivt+0x5700
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
 ENTRY(speculation_vector)
        DBG_FAULT(27)
@@ -1720,12 +1805,13 @@ ENTRY(speculation_vector)
        FAULT_OR_REFLECT(27)
 #else
        /*
-        * A [f]chk.[as] instruction needs to take the branch to the recovery 
code but
-        * this part of the architecture is not implemented in hardware on some 
CPUs, such
-        * as Itanium.  Thus, in general we need to emulate the behavior.  IIM 
contains
-        * the relative target (not yet sign extended).  So after sign 
extending it we
-        * simply add it to IIP.  We also need to reset the EI field of the 
IPSR to zero,
-        * i.e., the slot to restart into.
+        * A [f]chk.[as] instruction needs to take the branch to the
+        * recovery code but this part of the architecture is not 
+        * implemented in hardware on some CPUs, such as Itanium.  Thus,
+        * in general we need to emulate the behavior.  IIM contains the
+        * relative target (not yet sign extended).  So after sign extending 
+        * it we simply add it to IIP.  We also need to reset the EI field
+        * of the IPSR to zero, i.e., the slot to restart into.
         *
         * cr.imm contains zero_ext(imm21)
         */
@@ -1753,13 +1839,13 @@ END(speculation_vector)
 END(speculation_vector)
 
        .org ia64_ivt+0x5800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5800 Entry 28 (size 16 bundles) Reserved
        DBG_FAULT(28)
        FAULT(28)
 
        .org ia64_ivt+0x5900
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
 ENTRY(debug_vector)
        DBG_FAULT(29)
@@ -1771,7 +1857,7 @@ END(debug_vector)
 END(debug_vector)
 
        .org ia64_ivt+0x5a00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
 ENTRY(unaligned_access)
        DBG_FAULT(30)
@@ -1786,7 +1872,7 @@ END(unaligned_access)
 END(unaligned_access)
 
        .org ia64_ivt+0x5b00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
 ENTRY(unsupported_data_reference)
        DBG_FAULT(31)
@@ -1798,7 +1884,7 @@ END(unsupported_data_reference)
 END(unsupported_data_reference)
 
        .org ia64_ivt+0x5c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
 ENTRY(floating_point_fault)
        DBG_FAULT(32)
@@ -1810,7 +1896,7 @@ END(floating_point_fault)
 END(floating_point_fault)
 
        .org ia64_ivt+0x5d00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
 ENTRY(floating_point_trap)
        DBG_FAULT(33)
@@ -1822,7 +1908,7 @@ END(floating_point_trap)
 END(floating_point_trap)
 
        .org ia64_ivt+0x5e00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
 ENTRY(lower_privilege_trap)
        DBG_FAULT(34)
@@ -1834,7 +1920,7 @@ END(lower_privilege_trap)
 END(lower_privilege_trap)
 
        .org ia64_ivt+0x5f00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
 ENTRY(taken_branch_trap)
        DBG_FAULT(35)
@@ -1846,7 +1932,7 @@ END(taken_branch_trap)
 END(taken_branch_trap)
 
        .org ia64_ivt+0x6000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
 ENTRY(single_step_trap)
        DBG_FAULT(36)
@@ -1858,56 +1944,58 @@ END(single_step_trap)
 END(single_step_trap)
 
        .org ia64_ivt+0x6100
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6100 Entry 37 (size 16 bundles) Reserved
        DBG_FAULT(37)
        FAULT(37)
 
        .org ia64_ivt+0x6200
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6200 Entry 38 (size 16 bundles) Reserved
        DBG_FAULT(38)
        FAULT(38)
 
        .org ia64_ivt+0x6300
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6300 Entry 39 (size 16 bundles) Reserved
        DBG_FAULT(39)
        FAULT(39)
 
        .org ia64_ivt+0x6400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6400 Entry 40 (size 16 bundles) Reserved
        DBG_FAULT(40)
        FAULT(40)
 
        .org ia64_ivt+0x6500
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6500 Entry 41 (size 16 bundles) Reserved
        DBG_FAULT(41)
        FAULT(41)
 
        .org ia64_ivt+0x6600
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6600 Entry 42 (size 16 bundles) Reserved
        DBG_FAULT(42)
        FAULT(42)
 
        .org ia64_ivt+0x6700
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6700 Entry 43 (size 16 bundles) Reserved
        DBG_FAULT(43)
        FAULT(43)
 
        .org ia64_ivt+0x6800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6800 Entry 44 (size 16 bundles) Reserved
        DBG_FAULT(44)
        FAULT(44)
 
        .org ia64_ivt+0x6900
-/////////////////////////////////////////////////////////////////////////////////////////
-// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception 
(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+//////////////////////////////////////////////////////////////////////////
+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
+//                                                    44,58,60,61,62,72,
+//                                                    73,75,76,77)
 ENTRY(ia32_exception)
        DBG_FAULT(45)
 #ifdef XEN
@@ -1918,7 +2006,7 @@ END(ia32_exception)
 END(ia32_exception)
 
        .org ia64_ivt+0x6a00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
 ENTRY(ia32_intercept)
        DBG_FAULT(46)
@@ -1952,7 +2040,7 @@ END(ia32_intercept)
 END(ia32_intercept)
 
        .org ia64_ivt+0x6b00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
 ENTRY(ia32_interrupt)
        DBG_FAULT(47)
@@ -1969,121 +2057,121 @@ END(ia32_interrupt)
 END(ia32_interrupt)
 
        .org ia64_ivt+0x6c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6c00 Entry 48 (size 16 bundles) Reserved
        DBG_FAULT(48)
        FAULT(48)
 
        .org ia64_ivt+0x6d00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6d00 Entry 49 (size 16 bundles) Reserved
        DBG_FAULT(49)
        FAULT(49)
 
        .org ia64_ivt+0x6e00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6e00 Entry 50 (size 16 bundles) Reserved
        DBG_FAULT(50)
        FAULT(50)
 
        .org ia64_ivt+0x6f00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x6f00 Entry 51 (size 16 bundles) Reserved
        DBG_FAULT(51)
        FAULT(51)
 
        .org ia64_ivt+0x7000
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7000 Entry 52 (size 16 bundles) Reserved
        DBG_FAULT(52)
        FAULT(52)
 
        .org ia64_ivt+0x7100
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7100 Entry 53 (size 16 bundles) Reserved
        DBG_FAULT(53)
        FAULT(53)
 
        .org ia64_ivt+0x7200
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7200 Entry 54 (size 16 bundles) Reserved
        DBG_FAULT(54)
        FAULT(54)
 
        .org ia64_ivt+0x7300
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7300 Entry 55 (size 16 bundles) Reserved
        DBG_FAULT(55)
        FAULT(55)
 
        .org ia64_ivt+0x7400
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7400 Entry 56 (size 16 bundles) Reserved
        DBG_FAULT(56)
        FAULT(56)
 
        .org ia64_ivt+0x7500
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7500 Entry 57 (size 16 bundles) Reserved
        DBG_FAULT(57)
        FAULT(57)
 
        .org ia64_ivt+0x7600
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7600 Entry 58 (size 16 bundles) Reserved
        DBG_FAULT(58)
        FAULT(58)
 
        .org ia64_ivt+0x7700
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7700 Entry 59 (size 16 bundles) Reserved
        DBG_FAULT(59)
        FAULT(59)
 
        .org ia64_ivt+0x7800
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7800 Entry 60 (size 16 bundles) Reserved
        DBG_FAULT(60)
        FAULT(60)
 
        .org ia64_ivt+0x7900
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7900 Entry 61 (size 16 bundles) Reserved
        DBG_FAULT(61)
        FAULT(61)
 
        .org ia64_ivt+0x7a00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7a00 Entry 62 (size 16 bundles) Reserved
        DBG_FAULT(62)
        FAULT(62)
 
        .org ia64_ivt+0x7b00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7b00 Entry 63 (size 16 bundles) Reserved
        DBG_FAULT(63)
        FAULT(63)
 
        .org ia64_ivt+0x7c00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7c00 Entry 64 (size 16 bundles) Reserved
        DBG_FAULT(64)
        FAULT(64)
 
        .org ia64_ivt+0x7d00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7d00 Entry 65 (size 16 bundles) Reserved
        DBG_FAULT(65)
        FAULT(65)
 
        .org ia64_ivt+0x7e00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7e00 Entry 66 (size 16 bundles) Reserved
        DBG_FAULT(66)
        FAULT(66)
 
        .org ia64_ivt+0x7f00
-/////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
 // 0x7f00 Entry 67 (size 16 bundles) Reserved
        DBG_FAULT(67)
        FAULT(67)
@@ -2104,21 +2192,22 @@ GLOBAL_ENTRY(dispatch_reflection)
        adds out1=16,sp
        mov out2=cr.isr
        mov out3=cr.iim
-//     mov out3=cr.itir
+//     mov out3=cr.itir                // TODO: why commented out?
 
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
-       ;;
-(p15)  ssm psr.i                               // restore psr.i
-       adds r3=8,r2                            // set up second base pointer
+       srlz.i                          // guarantee that interruption 
+                                       //   collection is on
+       ;;
+(p15)  ssm psr.i                       // restore psr.i
+       adds r3=8,r2                    // set up second base pointer
        ;;
        SAVE_REST
        movl r14=ia64_leave_kernel
        ;;
        mov rp=r14
-//     br.sptk.many ia64_prepare_handle_reflection
-    br.call.sptk.many b6=ia64_handle_reflection
+//     br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
+       br.call.sptk.many b6=ia64_handle_reflection
 END(dispatch_reflection)
 
 #define SAVE_MIN_COVER_DONE    DO_SAVE_MIN(,mov r30=cr.ifs,)
@@ -2134,10 +2223,10 @@ END(dispatch_slow_hyperprivop)
 #ifdef CONFIG_IA32_SUPPORT
 
        /*
-        * There is no particular reason for this code to be here, other than 
that
-        * there happens to be space here that would go unused otherwise.  If 
this
-        * fault ever gets "unreserved", simply moved the following code to a 
more
-        * suitable spot...
+        * There is no particular reason for this code to be here, other 
+        * than that there happens to be space here that would go unused 
+        * otherwise.  If this fault ever gets "unreserved", simply move
+        * the following code to a more suitable spot...
         */
 
        // IA32 interrupt entry point
@@ -2148,7 +2237,7 @@ ENTRY(dispatch_to_ia32_handler)
        mov r14=cr.isr
        ssm psr.ic | PSR_DEFAULT_BITS
        ;;
-       srlz.i                                  // guarantee that interruption 
collection is on
+       srlz.i                  // guarantee that interruption collection is on
        ;;
 (p15)  ssm psr.i
        adds r3=8,r2            // Base pointer for SAVE_REST
@@ -2161,15 +2250,17 @@ ENTRY(dispatch_to_ia32_handler)
        cmp.ne p6,p0=r14,r15
 (p6)   br.call.dpnt.many b6=non_ia32_syscall
 
-       adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW 
conventions
+       adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW 
+                                               //   conventions
        adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
        ;;
        cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
        ld8 r8=[r14]            // get r8
        ;;
-       st8 [r15]=r8            // save original EAX in r1 (IA32 procs don't 
use the GP)
-       ;;
-       alloc r15=ar.pfs,0,0,6,0        // must first in an insn group
+       st8 [r15]=r8            // save original EAX in r1 (IA32 procs 
+                               //   don't use the GP)
+       ;;
+       alloc r15=ar.pfs,0,0,6,0        // must be first in an insn group
        ;;
        ld4 r8=[r14],8          // r8 == eax (syscall number)
        mov r15=IA32_NR_syscalls
@@ -2208,7 +2299,7 @@ non_ia32_syscall:
        alloc r15=ar.pfs,0,0,2,0
        mov out0=r14                            // interrupt #
        add out1=16,sp                          // pointer to pt_regs
-       ;;                      // avoid WAW on CFM
+       ;;                                      // avoid WAW on CFM
        br.call.sptk.many rp=ia32_bad_interrupt
 .ret1: movl r15=ia64_leave_kernel
        ;;
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/mm.c    Wed Aug 30 22:36:18 2006 +0100
@@ -166,7 +166,8 @@
 #include <xen/sched.h>
 #include <xen/domain.h>
 #include <asm/xentypes.h>
-#include <asm/mm.h>
+#include <xen/mm.h>
+#include <xen/errno.h>
 #include <asm/pgalloc.h>
 #include <asm/vhpt.h>
 #include <asm/vcpu.h>
@@ -948,8 +949,6 @@ efi_mmio(unsigned long physaddr, unsigne
                 return 1;
             }
 
-            DPRINTK("%s:%d physaddr 0x%lx size = 0x%lx\n",
-                    __func__, __LINE__, physaddr, size);
             return 0;
         }
 
@@ -970,8 +969,10 @@ assign_domain_mmio_page(struct domain *d
                 __func__, d, mpaddr, size);
     }
     if (!efi_mmio(mpaddr, size)) {
+#ifndef NDEBUG
         DPRINTK("%s:%d domain %p mpaddr 0x%lx size = 0x%lx\n",
                 __func__, __LINE__, d, mpaddr, size);
+#endif
         return -EINVAL;
     }
     assign_domain_same_page(d, mpaddr, size, ASSIGN_writable | ASSIGN_nocache);
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/pcdp.c
--- a/xen/arch/ia64/xen/pcdp.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/pcdp.c  Wed Aug 30 22:36:18 2006 +0100
@@ -18,6 +18,7 @@
 #ifdef XEN
 #include <linux/efi.h>
 #include <linux/errno.h>
+#include <asm/io.h>
 #include <asm/iosapic.h>
 #include <asm/system.h>
 #include <acpi/acpi.h>
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/privop.c        Wed Aug 30 22:36:18 2006 +0100
@@ -13,7 +13,7 @@
 #include <asm/dom_fw.h>
 #include <asm/vhpt.h>
 #include <asm/bundle.h>
-#include <asm/privop_stat.h>
+#include <xen/perfc.h>
 
 long priv_verbose=0;
 unsigned long privop_trace = 0;
@@ -682,7 +682,7 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
                return IA64_ILLOP_FAULT;
        }
        //if (isrcode != 1 && isrcode != 2) return 0;
-       privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
+       privlvl = ia64_get_cpl(ipsr);
        // its OK for a privified-cover to be executed in user-land
        fault = priv_handle_op(vcpu,regs,privlvl);
        if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { // 
success!!
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/privop_stat.c
--- a/xen/arch/ia64/xen/privop_stat.c   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/privop_stat.c   Wed Aug 30 22:36:18 2006 +0100
@@ -1,26 +1,55 @@
+#include <xen/lib.h>
+#include <public/xen.h>
+#include <xen/perfc.h>
+#include <asm/atomic.h>
 #include <asm/privop_stat.h>
-#include <asm/vhpt.h>
-#include <xen/lib.h>
-#include <asm/uaccess.h>
 
-#ifdef PRIVOP_ADDR_COUNT
-#define PRIVOP_COUNT_NINSTS 2
-#define PRIVOP_COUNT_NADDRS 30
+#ifdef CONFIG_PRIVOP_ADDRS
 
 struct privop_addr_count {
-       const char *instname;
        unsigned long addr[PRIVOP_COUNT_NADDRS];
-       unsigned long count[PRIVOP_COUNT_NADDRS];
-       unsigned long overflow;
+       unsigned int count[PRIVOP_COUNT_NADDRS];
+       unsigned int overflow;
+       atomic_t *perfc_addr;
+       atomic_t *perfc_count;
+       atomic_t *perfc_overflow;
 };
 
+#undef  PERFCOUNTER
+#define PERFCOUNTER(var, name)
 
-static struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
-       [_GET_IFA] = { "=ifa",  { 0 }, { 0 }, 0 },
-       [_THASH] = { "thash", { 0 }, { 0 }, 0 }
+#undef  PERFCOUNTER_CPU
+#define PERFCOUNTER_CPU(var, name)
+
+#undef  PERFCOUNTER_ARRAY
+#define PERFCOUNTER_ARRAY(var, name, size)
+
+#undef  PERFSTATUS
+#define PERFSTATUS(var, name)
+
+#undef  PERFSTATUS_CPU
+#define PERFSTATUS_CPU(var, name)
+
+#undef  PERFSTATUS_ARRAY
+#define PERFSTATUS_ARRAY(var, name, size)
+
+#undef PERFPRIVOPADDR
+#define PERFPRIVOPADDR(name)                        \
+    {                                               \
+        { 0 }, { 0 }, 0,                            \
+        perfcounters.privop_addr_##name##_addr,     \
+        perfcounters.privop_addr_##name##_count,    \
+        perfcounters.privop_addr_##name##_overflow  \
+    },
+
+static struct privop_addr_count privop_addr_counter[] = {
+#include <asm/perfc_defn.h>
 };
 
-void privop_count_addr(unsigned long iip, int inst)
+#define PRIVOP_COUNT_NINSTS \
+        (sizeof(privop_addr_counter) / sizeof(privop_addr_counter[0]))
+
+void privop_count_addr(unsigned long iip, enum privop_inst inst)
 {
        struct privop_addr_count *v = &privop_addr_counter[inst];
        int i;
@@ -41,29 +70,28 @@ void privop_count_addr(unsigned long iip
        v->overflow++;;
 }
 
-static int dump_privop_addrs(char *buf)
+void gather_privop_addrs(void)
 {
        int i, j;
-       char *s = buf;
-       s += sprintf(s, "Privop addresses:\n");
+       atomic_t *v;
        for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
-               struct privop_addr_count *v = &privop_addr_counter[i];
-               s += sprintf(s, "%s:\n", v->instname);
-               for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
-                       if (!v->addr[j])
-                               break;
-                       s += sprintf(s, " at 0x%lx #%ld\n",
-                                    v->addr[j], v->count[j]);
-               }
-               if (v->overflow) 
-                       s += sprintf(s, " other #%ld\n", v->overflow);
+               /* Note: addresses are truncated!  */
+               v = privop_addr_counter[i].perfc_addr;
+               for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
+                       atomic_set(&v[j], privop_addr_counter[i].addr[j]);
+
+               v = privop_addr_counter[i].perfc_count;
+               for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
+                       atomic_set(&v[j], privop_addr_counter[i].count[j]);
+               
+               atomic_set(privop_addr_counter[i].perfc_overflow,
+                          privop_addr_counter[i].overflow);
        }
-       return s - buf;
 }
 
-static void zero_privop_addrs(void)
+void reset_privop_addrs(void)
 {
-       int i,j;
+       int i, j;
        for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
                struct privop_addr_count *v = &privop_addr_counter[i];
                for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
@@ -119,30 +147,3 @@ static const char * const hyperpriv_str[
        "=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
 };
 #endif
-
-#define TMPBUFLEN 8*1024
-int dump_privop_counts_to_user(char __user *ubuf, int len)
-{
-       char buf[TMPBUFLEN];
-       int n;
-
-       if (len < TMPBUFLEN)
-               return -1;
-
-       n = 0;
-#ifdef PRIVOP_ADDR_COUNT
-       n += dump_privop_addrs(buf + n);
-#endif
-       n += dump_vhpt_stats(buf + n);
-       if (__copy_to_user(ubuf,buf,n))
-               return -1;
-       return n;
-}
-
-int zero_privop_counts_to_user(char __user *ubuf, int len)
-{
-#ifdef PRIVOP_ADDR_COUNT
-       zero_privop_addrs();
-#endif
-       return 0;
-}
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/vcpu.c  Wed Aug 30 22:36:18 2006 +0100
@@ -8,6 +8,7 @@
 
 #include <linux/sched.h>
 #include <public/xen.h>
+#include <xen/mm.h>
 #include <asm/ia64_int.h>
 #include <asm/vcpu.h>
 #include <asm/regionreg.h>
@@ -22,6 +23,7 @@
 #include <asm/vmx_phy_mode.h>
 #include <asm/bundle.h>
 #include <asm/privop_stat.h>
+#include <asm/uaccess.h>
 
 /* FIXME: where these declarations should be there ? */
 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct 
pt_regs *regs);
@@ -473,7 +475,7 @@ IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT6
 
 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
 {
-       PRIVOP_COUNT_ADDR(vcpu_regs(vcpu),_GET_IFA);
+       PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
        *pval = PSCB(vcpu,ifa);
        return (IA64_NO_FAULT);
 }
@@ -540,7 +542,7 @@ IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT6
 
 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
 {
-       PRIVOP_COUNT_ADDR(vcpu_regs(vcpu),_THASH);
+       PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash);
        *pval = PSCB(vcpu,iha);
        return (IA64_NO_FAULT);
 }
@@ -2215,3 +2217,28 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v
 
        return IA64_NO_FAULT;
 }
+
+int ia64_map_hypercall_param(void)
+{
+       struct vcpu *v = current;
+       struct domain *d = current->domain;
+       u64 vaddr = v->arch.hypercall_param.va & PAGE_MASK;
+       volatile pte_t* pte;
+
+       if (v->arch.hypercall_param.va == 0)
+               return FALSE;
+       pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa1);
+       if (!pte || !pte_present(*pte))
+               return FALSE;
+       vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte), -1UL, PAGE_SHIFT);
+       if (v->arch.hypercall_param.pa2) {
+               vaddr += PAGE_SIZE;
+               pte = lookup_noalloc_domain_pte(d, v->arch.hypercall_param.pa2);
+               if (pte && pte_present(*pte)) {
+                       vcpu_itc_no_srlz(v, 2, vaddr, pte_val(*pte),
+                                        -1UL, PAGE_SHIFT);
+               }
+       }
+       ia64_srlz_d();
+       return TRUE;
+}
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/vhpt.c  Wed Aug 30 22:36:18 2006 +0100
@@ -261,13 +261,12 @@ void flush_tlb_mask(cpumask_t mask)
             (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
 }
 
-int dump_vhpt_stats(char *buf)
+#ifdef PERF_COUNTERS
+void gather_vhpt_stats(void)
 {
        int i, cpu;
-       char *s = buf;
-
-       s += sprintf(s,"VHPT usage (%ld entries):\n",
-                    (unsigned long) VHPT_NUM_ENTRIES);
+
+       perfc_set(vhpt_nbr_entries, VHPT_NUM_ENTRIES);
 
        for_each_present_cpu (cpu) {
                struct vhpt_lf_entry *v = __va(per_cpu(vhpt_paddr, cpu));
@@ -276,8 +275,7 @@ int dump_vhpt_stats(char *buf)
                for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
                        if (!(v->ti_tag & INVALID_TI_TAG))
                                vhpt_valid++;
-               s += sprintf(s,"  cpu %d: %ld\n", cpu, vhpt_valid);
-       }
-
-       return s - buf;
-}
+               perfc_seta(vhpt_valid_entries, cpu, vhpt_valid);
+       }
+}
+#endif
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/xen.lds.S
--- a/xen/arch/ia64/xen/xen.lds.S       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/xen.lds.S       Wed Aug 30 22:36:18 2006 +0100
@@ -169,7 +169,9 @@ SECTIONS
          *(.data.gate)
          __stop_gate_section = .;
        }
-  . = ALIGN(PAGE_SIZE);                /* make sure the gate page doesn't 
expose kernel data */
+  . = ALIGN(PAGE_SIZE);                /* make sure the gate page doesn't 
expose
+                                * kernel data
+                                */
 
   .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
         { *(.data.cacheline_aligned) }
@@ -184,7 +186,9 @@ SECTIONS
                *(.data.percpu)
                __per_cpu_end = .;
        }
-  . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into 
percpu page size */
+  . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
+                                                * into percpu page size
+                                                */
 
   data : { } :data
   .data : AT(ADDR(.data) - LOAD_OFFSET)
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/xenasm.S
--- a/xen/arch/ia64/xen/xenasm.S        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/xenasm.S        Wed Aug 30 22:36:18 2006 +0100
@@ -31,7 +31,7 @@
 //  loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
 //  loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
 GLOBAL_ENTRY(ia64_new_rr7)
-       // not sure this unwind statement is correct...
+       // FIXME? not sure this unwind statement is correct...
        .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
        alloc loc1 = ar.pfs, 5, 8, 0, 0
        movl loc2=PERCPU_ADDR
@@ -162,7 +162,7 @@ 1:
        dep r25=0,loc5,60,4             // convert pal vaddr to paddr
        ;;
        ptr.i   loc5,r23
-       or r25=r25,r26          // construct PA | page properties
+       or r25=r25,r26                  // construct PA | page properties
        mov cr.itir=r23
        mov cr.ifa=loc5
        ;;
@@ -191,10 +191,10 @@ GLOBAL_ENTRY(ia64_prepare_handle_privop)
         */
        mov r16=r0
        DO_SAVE_SWITCH_STACK
-       br.call.sptk.many rp=ia64_handle_privop         // stack frame setup in 
ivt
+       br.call.sptk.many rp=ia64_handle_privop // stack frame setup in ivt
 .ret22:        .body
        DO_LOAD_SWITCH_STACK
-       br.cond.sptk.many rp                            // goes to 
ia64_leave_kernel
+       br.cond.sptk.many rp                    // goes to ia64_leave_kernel
 END(ia64_prepare_handle_privop)
 
 GLOBAL_ENTRY(ia64_prepare_handle_break)
@@ -217,7 +217,7 @@ GLOBAL_ENTRY(ia64_prepare_handle_reflect
         */
        mov r16=r0
        DO_SAVE_SWITCH_STACK
-       br.call.sptk.many rp=ia64_handle_reflection     // stack frame setup in 
ivt
+       br.call.sptk.many rp=ia64_handle_reflection // stack frame setup in ivt
 .ret24:        .body
        DO_LOAD_SWITCH_STACK
        br.cond.sptk.many rp                    // goes to ia64_leave_kernel
@@ -301,7 +301,7 @@ 1:  cmp.eq p7,p8=1,r32              /* PAL_CACHE_FLUS
 (p8)   br.cond.sptk.few 1f
 #if 0
        mov r9=ar.lc
-       movl r8=524288                  /* flush 512k million cache lines 
(16MB) */
+       movl r8=524288          /* flush 512k million cache lines (16MB) */
        ;;
        mov ar.lc=r8
        movl r8=0xe000000000000000
@@ -319,7 +319,9 @@ 1:  cmp.eq p7,p8=15,r32             /* PAL_PERF_MON_
 1:     cmp.eq p7,p8=15,r32             /* PAL_PERF_MON_INFO */
 (p8)   br.cond.sptk.few 1f
        mov r8=0                        /* status = 0 */
-       movl r9 =0x08122f04             /* generic=4 width=47 retired=8 
cycles=18 */
+       movl r9 =0x08122f04             /* generic=4 width=47 retired=8 
+                                        * cycles=18
+                                        */
        mov r10=0                       /* reserved */
        mov r11=0                       /* reserved */
        mov r16=0xffff                  /* implemented PMC */
@@ -361,8 +363,8 @@ END(pal_emulator_static)
 END(pal_emulator_static)
 
 //  These instructions are copied in the domains.
-//  This is the virtual PAL, which simply does an hypercall.
-//  The size is 2 bunldes (32 Bytes).  It handles both static and stacked
+//  This is the virtual PAL, which simply does a hypercall.
+//  The size is 2 bundles (32 Bytes).  It handles both static and stacked
 //    convention.
 //  If you modify this code, you have to modify dom_fw.h (for the size) and
 //   dom_fw_pal_hypercall_patch.
@@ -376,7 +378,7 @@ GLOBAL_ENTRY(pal_call_stub)
        }
        {
         .mbb
-       break 0x1000    //  Hypercall vector (Value is patched).
+       break 0x1000            //  Hypercall vector (Value is patched).
 (p7)   br.cond.sptk.few rp
 (p8)   br.ret.sptk.few rp
        }
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/xensetup.c      Wed Aug 30 22:36:18 2006 +0100
@@ -26,11 +26,6 @@
 #include <linux/efi.h>
 #include <asm/iosapic.h>
 
-/* Be sure the struct shared_info size is <= XSI_SIZE.  */
-#if SHARED_INFO_SIZE > XSI_SIZE
-#error "struct shared_info bigger than XSI_SIZE"
-#endif
-
 unsigned long xenheap_phys_end, total_pages;
 
 char saved_command_line[COMMAND_LINE_SIZE];
@@ -258,6 +253,9 @@ void start_kernel(void)
     int i;
 #endif
 
+    /* Be sure the struct shared_info size is <= XSI_SIZE.  */
+    BUILD_BUG_ON(sizeof(struct shared_info) > XSI_SIZE);
+
     running_on_sim = is_platform_hp_ski();
     /* Kernel may be relocated by EFI loader */
     xen_pstart = ia64_tpa(KERNEL_START);
@@ -289,6 +287,7 @@ void start_kernel(void)
         ia64_boot_param->initrd_size = 0;
     }
 
+    printk("Xen command line: %s\n", saved_command_line);
     /* xenheap should be in same TR-covered range with xen image */
     xenheap_phys_end = xen_pstart + xenheap_size;
     printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
@@ -518,9 +517,6 @@ printk("num_online_cpus=%d, max_cpus=%d\
                        0) != 0)
         panic("Could not set up DOM0 guest OS\n");
 
-    /* PIN domain0 on CPU 0.  */
-    dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
-
     if (!running_on_sim)  // slow on ski and pages are pre-initialized to zero
        scrub_heap_pages();
 
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/ia64/xen/xentime.c
--- a/xen/arch/ia64/xen/xentime.c       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/ia64/xen/xentime.c       Wed Aug 30 22:36:18 2006 +0100
@@ -109,6 +109,7 @@ xen_timer_interrupt (int irq, void *dev_
 xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 {
        unsigned long new_itm, old_itc;
+       int f_setitm = 0;
 
 #if 0
 #define HEARTBEAT_FREQ 16      // period in seconds
@@ -129,11 +130,12 @@ xen_timer_interrupt (int irq, void *dev_
                        vcpu_pend_timer(current);
                        // ensure another timer interrupt happens even if 
domain doesn't
                        vcpu_set_next_timer(current);
+                       f_setitm = 1;
                }
 
        new_itm = local_cpu_data->itm_next;
 
-       if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm))
+       if (f_setitm && !time_after(ia64_get_itc(), new_itm)) 
                return;
 
        while (1) {
diff -r e01441c9a607 -r 50aea0ec406b xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/arch/x86/physdev.c    Wed Aug 30 22:36:18 2006 +0100
@@ -96,10 +96,11 @@ long do_physdev_op(int cmd, XEN_GUEST_HA
         if ( !IS_PRIV(current->domain) )
             break;
 
+        irq = irq_op.irq;
         ret = -EINVAL;
-        if ( (irq = irq_op.irq) >= NR_IRQS )
+        if ( (irq < 0) || (irq >= NR_IRQS) )
             break;
-        
+
         irq_op.vector = assign_irq_vector(irq);
         ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
         break;
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/config.h     Wed Aug 30 22:36:18 2006 +0100
@@ -36,6 +36,12 @@
 #define supervisor_mode_kernel (0)
 
 #define MAX_DMADOM_PFN (0x7FFFFFFFUL >> PAGE_SHIFT) /* 31 addressable bits */
+
+/* If PERFC is used, include privop maps.  */
+#ifdef PERF_COUNTERS
+#define CONFIG_PRIVOP_ADDRS
+#define PRIVOP_COUNT_NADDRS 30
+#endif
 
 #define CONFIG_VGA 1
 
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/dom_fw.h
--- a/xen/include/asm-ia64/dom_fw.h     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/dom_fw.h     Wed Aug 30 22:36:18 2006 +0100
@@ -7,19 +7,32 @@
 
 #include <linux/efi.h>
 
-#ifndef MB
-#define MB (1024*1024)
-#endif
+/* Portion of guest physical memory space reserved for PAL/SAL/EFI/ACPI
+   data and code.  */
+#define FW_BASE_PADDR          0x0000UL
+#define FW_END_PADDR           0x3000UL
 
 /* This is used to determined the portion of a domain's metaphysical memory
    space reserved for the hypercall patch table. */
-//FIXME: experiment with smaller sizes
-#define HYPERCALL_START        1UL*MB
-#define HYPERCALL_END  2UL*MB
+/* Map:
+   Index           Addr
+   0x0000-0x000f   0x0000-0x00ff  : unused
+   0x0010-0x001f   0x0100-0x01ff  : EFI
+   0x0080-0x008f   0x0800-0x08ff  : PAL/SAL
+   0x0090-0x009f   0x0900-0x09ff  : FPSWA
+*/
+#define        FW_HYPERCALL_BASE_PADDR 0x0000UL
+#define        FW_HYPERCALL_END_PADDR  0X1000UL
+#define        FW_HYPERCALL_PADDR(index) (FW_HYPERCALL_BASE_PADDR + (16UL * 
index))
 
-#define FW_HYPERCALL_BASE_PADDR HYPERCALL_START
-#define        FW_HYPERCALL_END_PADDR HYPERCALL_END
-#define        FW_HYPERCALL_PADDR(index) (FW_HYPERCALL_BASE_PADDR + (16UL * 
index))
+/* Base and end guest physical address of ACPI tables.  */
+#define FW_ACPI_BASE_PADDR     0x1000UL
+#define FW_ACPI_END_PADDR      0x2000UL
+
+/* Base and end guest physical address of EFI and SAL (non-ACPI) tables.  */
+#define FW_TABLES_BASE_PADDR   0x2000UL
+#define FW_TABLES_END_PADDR    0x3000UL
+
 
 /* Hypercalls number have a low part and a high part.
    The high part is the class (xen/pal/sal/efi).  */
@@ -91,16 +104,16 @@
  */
 
 /* these are indexes into the runtime services table */
-#define FW_HYPERCALL_EFI_GET_TIME_INDEX                        0UL
-#define FW_HYPERCALL_EFI_SET_TIME_INDEX                        1UL
-#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX         2UL
-#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX         3UL
-#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX 4UL
-#define FW_HYPERCALL_EFI_GET_VARIABLE_INDEX            5UL
-#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX       6UL
-#define FW_HYPERCALL_EFI_SET_VARIABLE_INDEX            7UL
-#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX        8UL
-#define FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX            9UL
+#define FW_HYPERCALL_EFI_GET_TIME_INDEX                        0x10UL
+#define FW_HYPERCALL_EFI_SET_TIME_INDEX                        0x11UL
+#define FW_HYPERCALL_EFI_GET_WAKEUP_TIME_INDEX         0x12UL
+#define FW_HYPERCALL_EFI_SET_WAKEUP_TIME_INDEX         0x13UL
+#define FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP_INDEX 0x14UL
+#define FW_HYPERCALL_EFI_GET_VARIABLE_INDEX            0x15UL
+#define FW_HYPERCALL_EFI_GET_NEXT_VARIABLE_INDEX       0x16UL
+#define FW_HYPERCALL_EFI_SET_VARIABLE_INDEX            0x17UL
+#define FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT_INDEX        0x18UL
+#define FW_HYPERCALL_EFI_RESET_SYSTEM_INDEX            0x19UL
 
 /* these are hypercall numbers */
 #define FW_HYPERCALL_EFI_CALL                          0x300UL
@@ -150,13 +163,10 @@
 
 /* Hypercalls index bellow _FIRST_ARCH are reserved by Xen, while those above
    are for the architecture.
-   Note: this limit was defined by Xen/ia64 (and not by Xen).²
+   Note: this limit was defined by Xen/ia64 (and not by Xen).
      This can be renumbered safely.
 */
 #define FW_HYPERCALL_FIRST_ARCH                0x300UL
-
-/* Xen/ia64 user hypercalls.  Only used for debugging.  */
-#define FW_HYPERCALL_FIRST_USER                0xff00UL
 
 /* Interrupt vector used for os boot rendez vous.  */
 #define XEN_SAL_BOOT_RENDEZ_VEC        0xF3
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/domain.h     Wed Aug 30 22:36:18 2006 +0100
@@ -118,8 +118,6 @@ struct arch_domain {
  
     /* Address of SAL emulator data  */
     struct xen_sal_data *sal_data;
-    /* SAL return point.  */
-    unsigned long sal_return_addr;
 
     /* Address of efi_runtime_services_t (placed in domain memory)  */
     void *efi_runtime;
@@ -137,10 +135,18 @@ struct arch_domain {
     atomic64_t shadow_fault_count;
 
     struct last_vcpu last_vcpu[NR_CPUS];
+
+    struct arch_vmx_domain arch_vmx; /* Virtual Machine Extensions */
 };
 #define INT_ENABLE_OFFSET(v)             \
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \
     offsetof(vcpu_info_t, evtchn_upcall_mask))
+
+struct hypercall_param {
+    unsigned long va;
+    unsigned long pa1;
+    unsigned long pa2;
+};
 
 struct arch_vcpu {
     /* Save the state of vcpu.
@@ -185,10 +191,14 @@ struct arch_vcpu {
     char irq_new_pending;
     char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
     char hypercall_continuation;
+
+    struct hypercall_param hypercall_param;  // used to remap a hypercall param
+
     //for phycial  emulation
     unsigned long old_rsc;
     int mode_flags;
     fpswa_ret_t fpswa_ret;     /* save return values of FPSWA emulation */
+    struct timer hlt_timer;
     struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
 
 #define INVALID_PROCESSOR       INT_MAX
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/grant_table.h        Wed Aug 30 22:36:18 2006 +0100
@@ -35,7 +35,7 @@ void guest_physmap_add_page(struct domai
                           gnttab_shared_maddr((d), (t), (i)));          \
         (IA64_GRANT_TABLE_PADDR >> PAGE_SHIFT) + (i);})
 
-#define gnttab_log_dirty(d, f) ((void)0)
+#define gnttab_mark_dirty(d, f) ((void)f)
 
 static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
 {
diff -r e01441c9a607 -r 50aea0ec406b 
xen/include/asm-ia64/linux-xen/asm/processor.h
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h    Wed Aug 30 14:09:31 
2006 -0500
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h    Wed Aug 30 22:36:18 
2006 +0100
@@ -717,6 +717,14 @@ prefetchw (const void *x)
 
 extern unsigned long boot_option_idle_override;
 
+#ifdef XEN
+static inline unsigned int
+ia64_get_cpl(unsigned long psr)
+{
+  return (psr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
+}
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_IA64_PROCESSOR_H */
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/linux-xen/linux/efi.h
--- a/xen/include/asm-ia64/linux-xen/linux/efi.h        Wed Aug 30 14:09:31 
2006 -0500
+++ b/xen/include/asm-ia64/linux-xen/linux/efi.h        Wed Aug 30 22:36:18 
2006 +0100
@@ -293,10 +293,6 @@ extern void efi_map_pal_code (void);
 extern void efi_map_pal_code (void);
 extern void efi_map_memmap(void);
 extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-#ifdef XEN
-typedef int (*efi_walk_type_callback_t)(efi_memory_desc_t *md, void *arg);
-extern void efi_memmap_walk_type(u32 type, efi_walk_type_callback_t callback, 
void *arg);
-#endif
 extern void efi_gettimeofday (struct timespec *ts);
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, 
if possible */
 extern u64 efi_get_iobase (void);
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/multicall.h
--- a/xen/include/asm-ia64/multicall.h  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/multicall.h  Wed Aug 30 22:36:18 2006 +0100
@@ -2,6 +2,7 @@
 #define __ASM_IA64_MULTICALL_H__
 
 #include <public/xen.h>
+#include <xen/errno.h>
 
 typedef unsigned long (*hypercall_t)(
                        unsigned long arg0,
@@ -11,17 +12,20 @@ typedef unsigned long (*hypercall_t)(
                        unsigned long arg4,
                        unsigned long arg5);
 
-extern hypercall_t ia64_hypercall_table[];
+extern const hypercall_t ia64_hypercall_table[];
 
 static inline void do_multicall_call(multicall_entry_t *call)
 {
-       call->result = (*ia64_hypercall_table[call->op])(
+       if (call->op < NR_hypercalls)
+               call->result = (*ia64_hypercall_table[call->op])(
                        call->args[0],
                        call->args[1],
                        call->args[2],
                        call->args[3],
                        call->args[4],
                        call->args[5]);
+       else
+               call->result = -ENOSYS;
 }
 
 #endif /* __ASM_IA64_MULTICALL_H__ */
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/perfc.h
--- a/xen/include/asm-ia64/perfc.h      Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/perfc.h      Wed Aug 30 22:36:18 2006 +0100
@@ -1,16 +1,22 @@
 #ifndef __ASM_PERFC_H__
 #define __ASM_PERFC_H__
 
-static inline void arch_perfc_printall (void)
+#include <asm/vhpt.h>
+#include <asm/privop_stat.h>
+
+static inline void arch_perfc_printall(void)
 {
 }
 
-static inline void arch_perfc_reset (void)
+static inline void arch_perfc_reset(void)
 {
+  reset_privop_addrs();
 }
 
-static inline void arch_perfc_gather (void)
+static inline void arch_perfc_gather(void)
 {
+  gather_vhpt_stats();
+  gather_privop_addrs();
 }
 
 #endif
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/perfc_defn.h
--- a/xen/include/asm-ia64/perfc_defn.h Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/perfc_defn.h Wed Aug 30 22:36:18 2006 +0100
@@ -40,3 +40,20 @@ PERFCOUNTER_ARRAY(fast_hyperprivop,   "f
 
 PERFCOUNTER_ARRAY(slow_reflect,       "slow reflection", 0x80)
 PERFCOUNTER_ARRAY(fast_reflect,       "fast reflection", 0x80)
+
+PERFSTATUS(vhpt_nbr_entries,          "nbr of entries per VHPT")
+PERFSTATUS_CPU(vhpt_valid_entries,    "nbr of valid entries in VHPT")
+
+#ifdef CONFIG_PRIVOP_ADDRS
+#ifndef PERFPRIVOPADDR
+#define PERFPRIVOPADDR(name) \
+PERFSTATUS_ARRAY(privop_addr_##name##_addr, "privop-addrs addr " #name, \
+                 PRIVOP_COUNT_NADDRS) \
+PERFSTATUS_ARRAY(privop_addr_##name##_count, "privop-addrs count " #name, \
+                 PRIVOP_COUNT_NADDRS) \
+PERFSTATUS(privop_addr_##name##_overflow, "privop-addrs overflow " #name)
+#endif
+
+PERFPRIVOPADDR(get_ifa)
+PERFPRIVOPADDR(thash)
+#endif
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/privop_stat.h
--- a/xen/include/asm-ia64/privop_stat.h        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/privop_stat.h        Wed Aug 30 22:36:18 2006 +0100
@@ -1,23 +1,48 @@
 #ifndef _XEN_UA64_PRIVOP_STAT_H
 #define _XEN_UA64_PRIVOP_STAT_H
+#include <asm/config.h>
 #include <xen/types.h>
 #include <public/xen.h>
 
-extern int dump_privop_counts_to_user(char *, int);
-extern int zero_privop_counts_to_user(char *, int);
+#ifdef CONFIG_PRIVOP_ADDRS
 
-#define PRIVOP_ADDR_COUNT
+extern void gather_privop_addrs(void);
+extern void reset_privop_addrs(void);
 
-#ifdef PRIVOP_ADDR_COUNT
+#undef  PERFCOUNTER
+#define PERFCOUNTER(var, name)
 
-/* INST argument of PRIVOP_COUNT_ADDR.  */
-#define _GET_IFA 0
-#define _THASH 1
+#undef  PERFCOUNTER_CPU
+#define PERFCOUNTER_CPU(var, name)
+
+#undef  PERFCOUNTER_ARRAY
+#define PERFCOUNTER_ARRAY(var, name, size)
+
+#undef  PERFSTATUS
+#define PERFSTATUS(var, name)
+
+#undef  PERFSTATUS_CPU
+#define PERFSTATUS_CPU(var, name)
+
+#undef  PERFSTATUS_ARRAY
+#define PERFSTATUS_ARRAY(var, name, size)
+
+#undef  PERFPRIVOPADDR
+#define PERFPRIVOPADDR(name) privop_inst_##name,
+
+enum privop_inst {
+#include <asm/perfc_defn.h>
+};
+
+#undef PERFPRIVOPADDR
+
 #define        PRIVOP_COUNT_ADDR(regs,inst) 
privop_count_addr(regs->cr_iip,inst)
-extern void privop_count_addr(unsigned long addr, int inst);
+extern void privop_count_addr(unsigned long addr, enum privop_inst inst);
 
 #else
-#define        PRIVOP_COUNT_ADDR(x,y) do {} while (0)
+#define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
+#define gather_privop_addrs() do {} while (0)
+#define reset_privop_addrs() do {} while (0)
 #endif
 
 #endif /* _XEN_UA64_PRIVOP_STAT_H */
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/uaccess.h
--- a/xen/include/asm-ia64/uaccess.h    Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/uaccess.h    Wed Aug 30 22:36:18 2006 +0100
@@ -211,16 +211,30 @@ extern unsigned long __must_check __copy
 extern unsigned long __must_check __copy_user (void __user *to, const void 
__user *from,
                                               unsigned long count);
 
+extern int ia64_map_hypercall_param(void);
+
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
-       return __copy_user(to, (void __user *) from, count);
+       unsigned long len;
+       len = __copy_user(to, (void __user *)from, count);
+       if (len == 0)
+               return 0;
+       if (ia64_map_hypercall_param())
+               len = __copy_user(to, (void __user *)from, count); /* retry */
+       return len;
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
-       return __copy_user((void __user *) to, from, count);
+       unsigned long len;
+       len = __copy_user((void __user *)to, from, count);
+       if (len == 0)
+               return 0;
+       if (ia64_map_hypercall_param())
+               len = __copy_user((void __user *) to, from, count); /* retry */
+       return len;
 }
 
 #define __copy_to_user_inatomic                __copy_to_user
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/vcpu.h       Wed Aug 30 22:36:18 2006 +0100
@@ -4,6 +4,7 @@
 // TODO: Many (or perhaps most) of these should eventually be
 // static inline functions
 
+#include <asm/delay.h>
 #include <asm/fpu.h>
 #include <asm/tlb.h>
 #include <asm/ia64_int.h>
@@ -15,6 +16,7 @@ struct vcpu;
 struct vcpu;
 typedef        struct vcpu VCPU;
 typedef cpu_user_regs_t REGS;
+extern u64 cycle_to_ns(u64 cycle);
 
 /* Note: PSCB stands for Privilegied State Communication Block.  */
 #define VCPU(_v,_x)    (_v->arch.privregs->_x)
@@ -183,6 +185,21 @@ itir_mask(UINT64 itir)
     return (~((1UL << itir_ps(itir)) - 1));
 }
 
+static inline s64
+vcpu_get_next_timer_ns(VCPU *vcpu)
+{
+    s64 vcpu_get_next_timer_ns;
+    u64 d = PSCBX(vcpu, domain_itm);
+    u64 now = ia64_get_itc();
+
+    if (d > now)
+        vcpu_get_next_timer_ns = cycle_to_ns(d - now) + NOW();
+    else
+        vcpu_get_next_timer_ns = cycle_to_ns(local_cpu_data->itm_delta) + 
NOW();
+
+    return vcpu_get_next_timer_ns;
+}
+
 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
 
 //#define vcpu_quick_region_check(_tr_regions,_ifa) 1
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/vhpt.h       Wed Aug 30 22:36:18 2006 +0100
@@ -32,7 +32,7 @@ struct vhpt_lf_entry {
 #define INVALID_TI_TAG 0x8000000000000000L
 
 extern void vhpt_init (void);
-extern int dump_vhpt_stats(char *buf);
+extern void gather_vhpt_stats(void);
 extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
                                 unsigned long logps);
 extern void vhpt_insert (unsigned long vadr, unsigned long pte,
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/vmx.h        Wed Aug 30 22:36:18 2006 +0100
@@ -35,7 +35,6 @@ extern void vmx_save_state(struct vcpu *
 extern void vmx_save_state(struct vcpu *v);
 extern void vmx_load_state(struct vcpu *v);
 extern void vmx_setup_platform(struct domain *d);
-extern void vmx_wait_io(void);
 extern void vmx_io_assist(struct vcpu *v);
 extern int ia64_hypercall (struct pt_regs *regs);
 extern void vmx_save_state(struct vcpu *v);
@@ -53,17 +52,14 @@ extern void vmx_intr_assist(struct vcpu 
 extern void vmx_intr_assist(struct vcpu *v);
 extern void set_illegal_op_isr (struct vcpu *vcpu);
 extern void illegal_op (struct vcpu *vcpu);
+extern void vmx_relinquish_guest_resources(struct domain *d);
 extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
 extern void vmx_die_if_kernel(char *str, struct pt_regs *regs, long err);
+extern void vmx_send_assist_req(struct vcpu *v);
 
 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
 {
     return &((shared_iopage_t 
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
-}
-
-static inline int iopacket_port(struct vcpu *v)
-{
-    return get_vio(v->domain, v->vcpu_id)->vp_eport;
 }
 
 static inline shared_iopage_t *get_sp(struct domain *d)
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Wed Aug 30 22:36:18 2006 +0100
@@ -239,12 +239,13 @@ vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
 {
     u64 mdcr, mask;
     VCPU(vcpu,dcr)=val;
-    /* All vDCR bits will go to mDCR, except for be/pp bit */
+    /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
     mdcr = ia64_get_dcr();
-    mask = IA64_DCR_BE | IA64_DCR_PP;
+    /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
+    mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
     mdcr = ( mdcr & mask ) | ( val & (~mask) );
     ia64_set_dcr( mdcr);
-
+    VMX(vcpu, mdcr) = mdcr;
     return IA64_NO_FAULT;
 }
 
diff -r e01441c9a607 -r 50aea0ec406b xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/asm-ia64/vmx_vpd.h    Wed Aug 30 22:36:18 2006 +0100
@@ -27,6 +27,7 @@
 #include <asm/vtm.h>
 #include <asm/vmx_platform.h>
 #include <public/xen.h>
+#include <xen/spinlock.h>
 
 #define VPD_SHIFT      17      /* 128K requirement */
 #define VPD_SIZE       (1 << VPD_SHIFT)
@@ -72,6 +73,11 @@ struct ivt_debug{
 };
 #define IVT_DEBUG_MAX 128
 #endif
+
+struct arch_vmx_domain {
+    spinlock_t virq_assist_lock; /* spinlock for pass virq */
+};
+
 struct arch_vmx_struct {
 //     vpd_t       *vpd;
     vtime_t        vtm;
@@ -89,13 +95,15 @@ struct arch_vmx_struct {
 //    unsigned long   mrr5;
 //    unsigned long   mrr6;
 //    unsigned long   mrr7;
+    unsigned long   mdcr;
     unsigned long   mpta;
 //    unsigned long   rfi_pfs;
 //    unsigned long   rfi_iip;
 //    unsigned long   rfi_ipsr;
 //    unsigned long   rfi_ifs;
 //     unsigned long   in_service[4];  // vLsapic inservice IRQ bits
-       unsigned long   flags;
+    unsigned long   flags;
+    unsigned long   xen_port;
 #ifdef VTI_DEBUG
     unsigned long  ivt_current;
     struct ivt_debug ivt_debug[IVT_DEBUG_MAX];
diff -r e01441c9a607 -r 50aea0ec406b xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/public/domctl.h       Wed Aug 30 22:36:18 2006 +0100
@@ -16,7 +16,7 @@
 
 #include "xen.h"
 
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000001
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000002
 
 #define uint64_t uint64_aligned_t
 
@@ -72,8 +72,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdo
 #define XEN_DOMCTL_getmemlist         6
 struct xen_domctl_getmemlist {
     /* IN variables. */
+    /* Max entries to write to output buffer. */
     uint64_t max_pfns;
-    XEN_GUEST_HANDLE_64(ulong) buffer;
+    /* Start index in guest's page list. */
+    uint64_t start_pfn;
+    XEN_GUEST_HANDLE_64(xen_pfn_t) buffer;
     /* OUT variables. */
     uint64_t num_pfns;
 };
diff -r e01441c9a607 -r 50aea0ec406b xen/include/public/xen.h
--- a/xen/include/public/xen.h  Wed Aug 30 14:09:31 2006 -0500
+++ b/xen/include/public/xen.h  Wed Aug 30 22:36:18 2006 +0100
@@ -63,6 +63,7 @@
 #define __HYPERVISOR_hvm_op               34
 #define __HYPERVISOR_sysctl               35
 #define __HYPERVISOR_domctl               36
+#define __HYPERVISOR_kexec_op             37
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
diff -r e01441c9a607 -r 50aea0ec406b 
linux-2.6-xen-sparse/include/asm-ia64/maddr.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/include/asm-ia64/maddr.h     Wed Aug 30 22:36:18 
2006 +0100
@@ -0,0 +1,88 @@
+#ifndef _ASM_IA64_MADDR_H
+#define _ASM_IA64_MADDR_H
+
+#include <linux/kernel.h>
+#include <asm/hypervisor.h>
+#include <xen/features.h>
+#include <xen/interface/xen.h>
+
+#ifdef CONFIG_XEN
+
+#define INVALID_P2M_ENTRY       (~0UL)
+
+/* XXX xen page size != page size */
+static inline unsigned long
+pfn_to_mfn_for_dma(unsigned long pfn)
+{
+       unsigned long mfn;
+       mfn = HYPERVISOR_phystomach(pfn);
+       BUG_ON(mfn == 0); // XXX
+       BUG_ON(mfn == INVALID_P2M_ENTRY); // XXX
+       BUG_ON(mfn == INVALID_MFN);
+       return mfn;
+}
+
+static inline unsigned long
+phys_to_machine_for_dma(unsigned long phys)
+{
+       unsigned long machine =
+                     pfn_to_mfn_for_dma(phys >> PAGE_SHIFT) << PAGE_SHIFT;
+       machine |= (phys & ~PAGE_MASK);
+       return machine;
+}
+
+static inline unsigned long
+mfn_to_pfn_for_dma(unsigned long mfn)
+{
+       unsigned long pfn;
+       pfn = HYPERVISOR_machtophys(mfn);
+       BUG_ON(pfn == 0);
+       //BUG_ON(pfn == INVALID_M2P_ENTRY);
+       return pfn;
+}
+
+static inline unsigned long
+machine_to_phys_for_dma(unsigned long machine)
+{
+       unsigned long phys =
+                     mfn_to_pfn_for_dma(machine >> PAGE_SHIFT) << PAGE_SHIFT;
+       phys |= (machine & ~PAGE_MASK);
+       return phys;
+}
+
+static inline unsigned long
+mfn_to_local_pfn(unsigned long mfn)
+{
+       extern unsigned long max_mapnr;
+       unsigned long pfn = mfn_to_pfn_for_dma(mfn);
+       if (!pfn_valid(pfn))
+               return INVALID_P2M_ENTRY;
+       return pfn;
+}
+
+#else /* !CONFIG_XEN */
+
+#define pfn_to_mfn_for_dma(pfn) (pfn)
+#define mfn_to_pfn_for_dma(mfn) (mfn)
+#define phys_to_machine_for_dma(phys) (phys)
+#define machine_to_phys_for_dma(machine) (machine)
+#define mfn_to_local_pfn(mfn) (mfn)
+
+#endif /* !CONFIG_XEN */
+
+/* XXX to compile set_phys_to_machine(vaddr, FOREIGN_FRAME(m)) */
+#define FOREIGN_FRAME(m)        (INVALID_P2M_ENTRY)
+
+#define mfn_to_pfn(mfn) (mfn)
+#define pfn_to_mfn(pfn) (pfn)
+
+#define mfn_to_virt(mfn) (__va((mfn) << PAGE_SHIFT))
+#define virt_to_mfn(virt) (__pa(virt) >> PAGE_SHIFT)
+#define virt_to_machine(virt) __pa(virt) // for tpmfront.c
+
+#define set_phys_to_machine(pfn, mfn) do { } while (0)
+#define xen_machphys_update(mfn, pfn) do { } while (0)
+
+typedef unsigned long maddr_t; // to compile netback, netfront
+
+#endif /* _ASM_IA64_MADDR_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.