[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] merge



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID eeac4fdf02ed4c2aec9793517ea441fe538ee326
# Parent  7edd64c8bb36c9d0c30f3e7a5a79f75501f2e280
# Parent  385ddb11971dc36eea9d07ebcd1635c8153cc86d
merge

diff -r 7edd64c8bb36 -r eeac4fdf02ed linux-2.6-xen-sparse/arch/ia64/Kconfig
--- a/linux-2.6-xen-sparse/arch/ia64/Kconfig    Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig    Thu Mar  2 10:00:49 2006
@@ -75,6 +75,11 @@
        default y
 
 config XEN_BLKDEV_FRONTEND
+       depends on XEN
+       bool
+       default y
+
+config XEN_BLKDEV_BACKEND
        depends on XEN
        bool
        default y
@@ -495,6 +500,7 @@
 
 config KPROBES
        bool "Kprobes (EXPERIMENTAL)"
+       depends on EXPERIMENTAL && MODULES
        help
          Kprobes allows you to trap at almost any kernel address and
          execute a callback function.  register_kprobe() establishes
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/arch/ia64/kernel/head.S
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/head.S      Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/head.S      Thu Mar  2 10:00:49 2006
@@ -363,6 +363,12 @@
        ;;
 (isBP) st8 [r2]=r28            // save the address of the boot param area 
passed by the bootloader
 
+#ifdef CONFIG_XEN
+       //  Note: isBP is used by the subprogram.
+       br.call.sptk.many rp=early_xen_setup
+       ;;
+#endif
+
 #ifdef CONFIG_SMP
 (isAP) br.call.sptk.many rp=start_secondary
 .ret0:
@@ -371,10 +377,6 @@
 
        // This is executed by the bootstrap processor (bsp) only:
 
-#ifdef CONFIG_XEN
-       br.call.sptk.many rp=early_xen_setup
-       ;;
-#endif
 #ifdef CONFIG_IA64_FW_EMU
        // initialize PAL & SAL emulator:
        br.call.sptk.many rp=sys_fw_init
diff -r 7edd64c8bb36 -r eeac4fdf02ed linux-2.6-xen-sparse/arch/ia64/kernel/sal.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/sal.c       Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/sal.c       Thu Mar  2 10:00:49 2006
@@ -336,8 +336,10 @@
                p += SAL_DESC_SIZE(*p);
        }
 
+#ifdef CONFIG_XEN
        if (!running_on_xen)
-               check_sal_cache_flush();
+#endif
+       check_sal_cache_flush();
 }
 
 int
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c     Thu Mar  2 10:00:49 2006
@@ -61,6 +61,9 @@
 #include <asm/system.h>
 #include <asm/unistd.h>
 #include <asm/system.h>
+#ifdef CONFIG_XEN
+#include <asm/hypervisor.h>
+#endif
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 # error "struct cpuinfo_ia64 too big!"
@@ -243,6 +246,14 @@
        rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
        n++;
 
+#ifdef CONFIG_XEN
+       if (running_on_xen) {
+               rsvd_region[n].start = (unsigned 
long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
+               rsvd_region[n].end   = rsvd_region[n].start + PAGE_SIZE;
+               n++;
+       }
+#endif
+
 #ifdef CONFIG_BLK_DEV_INITRD
        if (ia64_boot_param->initrd_start) {
                rsvd_region[n].start = (unsigned 
long)__va(ia64_boot_param->initrd_start);
@@ -260,6 +271,7 @@
        n++;
 
        num_rsvd_regions = n;
+       BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
 
        sort_regions(rsvd_region, num_rsvd_regions);
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c  Thu Mar  2 
09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c  Thu Mar  2 
10:00:49 2006
@@ -106,8 +106,10 @@
     BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
     evtchn = op.u.bind_virq.port;
 
-    if (!unbound_irq(evtchn))
-       return -EINVAL;
+    if (!unbound_irq(evtchn)) {
+        evtchn = -EINVAL;
+        goto out;
+    }
 
     evtchns[evtchn].handler = handler;
     evtchns[evtchn].dev_id = dev_id;
@@ -115,6 +117,7 @@
     irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
 
     unmask_evtchn(evtchn);
+out:
     spin_unlock(&irq_mapping_update_lock);
     return evtchn;
 }
@@ -125,8 +128,10 @@
 {
     spin_lock(&irq_mapping_update_lock);
 
-    if (!unbound_irq(evtchn))
-       return -EINVAL;
+    if (!unbound_irq(evtchn)) {
+       evtchn = -EINVAL;
+       goto out;
+    }
 
     evtchns[evtchn].handler = handler;
     evtchns[evtchn].dev_id = dev_id;
@@ -134,6 +139,7 @@
     irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
 
     unmask_evtchn(evtchn);
+out:
     spin_unlock(&irq_mapping_update_lock);
     return evtchn;
 }
@@ -158,7 +164,7 @@
     spin_lock(&irq_mapping_update_lock);
 
     if (unbound_irq(irq))
-        return;
+        goto out;
 
     op.cmd = EVTCHNOP_close;
     op.u.close.port = evtchn;
@@ -179,6 +185,7 @@
     evtchns[evtchn].handler = NULL;
     evtchns[evtchn].opened = 0;
 
+out:
     spin_unlock(&irq_mapping_update_lock);
 }
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S     Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S     Thu Mar  2 10:00:49 2006
@@ -14,20 +14,22 @@
 running_on_xen:
        data4 0
 
+#define isBP   p3      // are we the Bootstrap Processor?
+
        .text
 GLOBAL_ENTRY(early_xen_setup)
-       mov r8=cr.dcr;;
-       extr.u r8=r8,63,1
-       movl r9=running_on_xen;;
-       st4 [r9]=r8;;
+       mov r8=cr.dcr
+(isBP) movl r9=running_on_xen;;
+       extr.u r8=r8,63,1;;
        cmp.ne p7,p0=r8,r0;;
+(isBP) st4 [r9]=r8
 (p7)   movl r10=xen_ivt;;
 (p7)   mov cr.iva=r10
        br.ret.sptk.many rp;;
-END(xen_init)
+END(early_xen_setup)
 
 GLOBAL_ENTRY(is_running_on_xen)
        movl r9=running_on_xen;;
-       ld4 r8=[r9];;
+       ld4 r8=[r9]
        br.ret.sptk.many rp;;
 END(is_running_on_xen)
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h Thu Mar  2 10:00:49 2006
@@ -37,8 +37,6 @@
 #include <asm/page.h>
 #define virt_to_machine(v) __pa(v)
 #define machine_to_virt(m) __va(m)
-//#define virt_to_mfn(v)       (__pa(v) >> 14)
-//#define mfn_to_virt(m)       (__va(m << 14))
 #define virt_to_mfn(v) ((__pa(v)) >> PAGE_SHIFT)
 #define mfn_to_virt(m) (__va((m) << PAGE_SHIFT))
 
@@ -46,455 +44,210 @@
  * Assembler stubs for hyper-calls.
  */
 
-#if 0
-static inline int
-HYPERVISOR_set_trap_table(
-    trap_info_t *table)
-{
-#if 0
-    int ret;
-    unsigned long ignore;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ignore)
-       : "0" (__HYPERVISOR_set_trap_table), "1" (table)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_mmu_update(
-    mmu_update_t *req, int count, int *success_count, domid_t domid)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
-        "3" (success_count), "4" (domid)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_mmuext_op(
-    struct mmuext_op *op, int count, int *success_count, domid_t domid)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
-        "3" (success_count), "4" (domid)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_set_gdt(
-    unsigned long *frame_list, int entries)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
-       : "memory" );
-
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_stack_switch(
-    unsigned long ss, unsigned long esp)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
-    unsigned long event_selector, unsigned long event_address,
-    unsigned long failsafe_selector, unsigned long failsafe_address)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
-         "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
-    int set)
-{
-#if 0
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-        : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
-        : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
+#define _hypercall0(type, name)                                        \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name)       \
+                             : "r2","r8",                      \
+                               "memory" );                     \
+       (type)__res;                                            \
+})
+
+#define _hypercall1(type, name, a1)                            \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r14=%2\n"                    \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name),      \
+                               "r" ((unsigned long)(a1))       \
+                             : "r14","r2","r8",                \
+                               "memory" );                     \
+       (type)__res;                                            \
+})
+
+#define _hypercall2(type, name, a1, a2)                                \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r14=%2\n"                    \
+                             "mov r15=%3\n"                    \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name),      \
+                               "r" ((unsigned long)(a1)),      \
+                               "r" ((unsigned long)(a2))       \
+                             : "r14","r15","r2","r8",          \
+                               "memory" );                     \
+       (type)__res;                                            \
+})
+
+#define _hypercall3(type, name, a1, a2, a3)                    \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r14=%2\n"                    \
+                             "mov r15=%3\n"                    \
+                             "mov r16=%4\n"                    \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name),      \
+                               "r" ((unsigned long)(a1)),      \
+                               "r" ((unsigned long)(a2)),      \
+                               "r" ((unsigned long)(a3))       \
+                             : "r14","r15","r16","r2","r8",    \
+                               "memory" );                     \
+       (type)__res;                                            \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4)                        \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r14=%2\n"                    \
+                             "mov r15=%3\n"                    \
+                             "mov r16=%4\n"                    \
+                             "mov r17=%5\n"                    \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name),      \
+                               "r" ((unsigned long)(a1)),      \
+                               "r" ((unsigned long)(a2)),      \
+                               "r" ((unsigned long)(a3)),      \
+                               "r" ((unsigned long)(a4))       \
+                             : "r14","r15","r16","r2","r8",    \
+                               "r17","memory" );               \
+       (type)__res;                                            \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5)            \
+({                                                             \
+       long __res;                                             \
+       __asm__ __volatile__ (";;\n"                            \
+                             "mov r14=%2\n"                    \
+                             "mov r15=%3\n"                    \
+                             "mov r16=%4\n"                    \
+                             "mov r17=%5\n"                    \
+                             "mov r18=%6\n"                    \
+                             "mov r2=%1\n"                     \
+                             "break 0x1000 ;;\n"               \
+                             "mov %0=r8 ;;\n"                  \
+                             : "=r" (__res)                    \
+                             : "i" (__HYPERVISOR_##name),      \
+                               "r" ((unsigned long)(a1)),      \
+                               "r" ((unsigned long)(a2)),      \
+                               "r" ((unsigned long)(a3)),      \
+                               "r" ((unsigned long)(a4)),      \
+                               "r" ((unsigned long)(a5))       \
+                             : "r14","r15","r16","r2","r8",    \
+                               "r17","r18","memory" );         \
+       (type)__res;                                            \
+})
 
 static inline int
 HYPERVISOR_sched_op(
     int cmd, unsigned long arg)
 {
-    return 1;
-}
-
-static inline int
-HYPERVISOR_suspend(
-    unsigned long srec)
-{
-    return 1;
+       return _hypercall2(int, sched_op, cmd, arg);
 }
 
 static inline long
 HYPERVISOR_set_timer_op(
     u64 timeout)
 {
-#if 0
-    int ret;
     unsigned long timeout_hi = (unsigned long)(timeout>>32);
     unsigned long timeout_lo = (unsigned long)timeout;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
-       : "memory");
-
-    return ret;
-#endif
-    return 1;
+    return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
 }
 
 static inline int
 HYPERVISOR_dom0_op(
     dom0_op_t *dom0_op)
 {
-#if 0
-    int ret;
-    unsigned long ign1;
-
     dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1)
-       : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
-       : "memory");
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
-    int reg, unsigned long value)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
-    int reg)
-{
-#if 0
-    unsigned long ret;
-    unsigned long ign;
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
-    unsigned long ma, unsigned long word1, unsigned long word2)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-       : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
-         "3" (word2)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_set_fast_trap(
-    int idx)
-{
-#if 0
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_dom_mem_op(
-    unsigned int op, unsigned long *extent_list,
-    unsigned long nr_extents, unsigned int extent_order)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4, ign5;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
-         "=D" (ign5)
-       : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
-         "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
-        : "memory" );
-
-    return ret;
-#endif
-    return 1;
+    return _hypercall1(int, dom0_op, dom0_op);
 }
 
 static inline int
 HYPERVISOR_multicall(
     void *call_list, int nr_calls)
 {
-#if 0
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-#endif
-
-static inline int
-HYPERVISOR_update_va_mapping(
-    unsigned long va, pte_t new_val, unsigned long flags)
-{
-    /* no-op */
-    return 1;
+    return _hypercall2(int, multicall, call_list, nr_calls);
 }
 
 static inline int
 HYPERVISOR_memory_op(
     unsigned int cmd, void *arg)
 {
-    int ret;
-    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r2=%1 ; break 
0x1000 ;; mov %0=r8 ;;"
-        : "=r" (ret)
-        : "i" (__HYPERVISOR_memory_op), "r"(cmd), "r"(arg)
-        : "r14","r15","r2","r8","memory" );
-    return ret;
+    return _hypercall2(int, memory_op, cmd, arg);
 }
 
 static inline int
 HYPERVISOR_event_channel_op(
     void *op)
 {
-    int ret;
-    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r2=%1 ; break 0x1000 ;; mov 
%0=r8 ;;"
-        : "=r" (ret)
-        : "i" (__HYPERVISOR_event_channel_op), "r"(op)
-        : "r14","r2","r8","memory" );
-    return ret;
-}
-
-#if 0
+    return _hypercall1(int, event_channel_op, op);
+}
+
 static inline int
 HYPERVISOR_xen_version(
-    int cmd)
-{
-#if 0
-    int ret;
-    unsigned long ignore;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ignore)
-       : "0" (__HYPERVISOR_xen_version), "1" (cmd)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-#endif
+    int cmd, void *arg)
+{
+    return _hypercall2(int, xen_version, cmd, arg);
+}
 
 static inline int
 HYPERVISOR_console_io(
     int cmd, int count, char *str)
 {
-    int ret;
-    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov 
r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-        : "=r" (ret)
-        : "i" (__HYPERVISOR_console_io), "r"(cmd), "r"(count), "r"(str)
-        : "r14","r15","r16","r2","r8","memory" );
-    return ret;
-}
-
-#if 0
+    return _hypercall3(int, console_io, cmd, count, str);
+}
+
 static inline int
 HYPERVISOR_physdev_op(
     void *physdev_op)
 {
-#if 0
-    int ret;
-    unsigned long ign;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign)
-       : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-#endif
+    return _hypercall1(int, physdev_op, physdev_op);
+}
 
 static inline int
 HYPERVISOR_grant_table_op(
     unsigned int cmd, void *uop, unsigned int count)
 {
-    int ret;
-    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov 
r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-        : "=r" (ret)
-        : "i" (__HYPERVISOR_grant_table_op), "r"(cmd), "r"(uop), "r"(count)
-        : "r14","r15","r16","r2","r8","memory" );
-    return ret;
-}
-
-#if 0
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
-    unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2, ign3, ign4;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-       : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
-          "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
-        "memory" );
-    
-    return ret;
-#endif
-    return 1;
-}
-
-static inline int
-HYPERVISOR_vm_assist(
-    unsigned int cmd, unsigned int type)
-{
-#if 0
-    int ret;
-    unsigned long ign1, ign2;
-
-    __asm__ __volatile__ (
-        TRAP_INSTR
-        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-       : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
-       : "memory" );
-
-    return ret;
-#endif
-    return 1;
-}
-
-#endif
+    return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(
+       int cmd, int vcpuid, void *extra_args)
+{
+    return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(
+       unsigned long srec)
+{
+    return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+                       SHUTDOWN_suspend, srec);
+}
+
+extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
+static inline void exit_idle(void) {}
+#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
 
 #endif /* __HYPERCALL_H__ */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/asm-xsi-offsets.c
--- a/xen/arch/ia64/asm-xsi-offsets.c   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/asm-xsi-offsets.c   Thu Mar  2 10:00:49 2006
@@ -84,8 +84,6 @@
        DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
pending_interruption)));
        DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, 
incomplete_regframe));
        DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
incomplete_regframe)));
-       DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
-       DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
delivery_mask[0])));
        DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
        DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
metaphysical_mode)));
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/README.origin
--- a/xen/arch/ia64/linux-xen/README.origin     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/README.origin     Thu Mar  2 10:00:49 2006
@@ -5,19 +5,24 @@
 # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
 # easily updated to future versions of the corresponding Linux files.
 
-efi.c          -> linux/arch/ia64/kernel/efi.c
-entry.h                -> linux/arch/ia64/kernel/entry.h
-entry.S                -> linux/arch/ia64/kernel/entry.S
-hpsim_ssc.h    -> linux/arch/ia64/hp/sim/hpsim_ssc.h
-irq_ia64.c     -> linux/arch/ia64/kernel/irq_ia64.c
-minstate.h     -> linux/arch/ia64/kernel/minstate.h
-mm_contig.c    -> linux/arch/ia64/mm/contig.c
-pal.S          -> linux/arch/ia64/kernel/pal.S
-sal.c          -> linux/arch/ia64/kernel/sal.c
-setup.c                -> linux/arch/ia64/kernel/setup.c
-smp.c          -> linux/arch/ia64/kernel/smp.c
-smpboot.c      -> linux/arch/ia64/kernel/smpboot.c
-sort.c         -> linux/lib/sort.c
-time.c         -> linux/arch/ia64/kernel/time.c
-tlb.c          -> linux/arch/ia64/mm/tlb.c
-unaligned.c    -> linux/arch/ia64/kernel/unaligned.c
+efi.c                  -> linux/arch/ia64/kernel/efi.c
+entry.h                        -> linux/arch/ia64/kernel/entry.h
+entry.S                        -> linux/arch/ia64/kernel/entry.S
+head.S                 -> linux/arch/ia64/kernel/head.S
+hpsim_ssc.h            -> linux/arch/ia64/hp/sim/hpsim_ssc.h
+irq_ia64.c             -> linux/arch/ia64/kernel/irq_ia64.c
+minstate.h             -> linux/arch/ia64/kernel/minstate.h
+mm_contig.c            -> linux/arch/ia64/mm/contig.c
+pal.S                  -> linux/arch/ia64/kernel/pal.S
+process-linux-xen.c    -> linux/arch/ia64/kernel/process.c
+sal.c                  -> linux/arch/ia64/kernel/sal.c
+setup.c                        -> linux/arch/ia64/kernel/setup.c
+smp.c                  -> linux/arch/ia64/kernel/smp.c
+smpboot.c              -> linux/arch/ia64/kernel/smpboot.c
+sort.c                 -> linux/lib/sort.c
+time.c                 -> linux/arch/ia64/kernel/time.c
+tlb.c                  -> linux/arch/ia64/mm/tlb.c
+unaligned.c            -> linux/arch/ia64/kernel/unaligned.c
+unwind.c               -> linux/arch/ia64/kernel/unwind.c
+unwind_decoder.c       -> linux/arch/ia64/kernel/unwind_decoder.c
+unwind_i.h             -> linux/arch/ia64/kernel/unwind_i.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/efi.c     Thu Mar  2 10:00:49 2006
@@ -534,32 +534,9 @@
 {
 #ifdef XEN
        u64 psr;
-       static unsigned long last_rr7 = 0;
-       unsigned long current_rr7 = ia64_get_rr(7L<<61);
-
-       // this routine is called only once in Linux but may be called
-       // multiple times in Xen.  However, we only need to flush and
-       // reset itr[IA64_TR_PALCODE] if rr7 changes
        if (!pal_vaddr) {
                pal_vaddr = efi_get_pal_addr ();
-               last_rr7 = current_rr7;
-       }
-       else if (last_rr7 == current_rr7) return;
-       else {
-               last_rr7 = current_rr7;
-               printk("efi_map_pal_code,remapping pal w/rr7=%lx\n",last_rr7);
-       }
-
-       printf("efi_map_pal_code: about to ia64_ptr(%d,%p,%p)\n",
-               0x1, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
-                IA64_GRANULE_SHIFT);
-       ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
-                IA64_GRANULE_SHIFT);
-       ia64_srlz_i();
-       printf("efi_map_pal_code: about to ia64_itr(%p,%p,%p,%p,%p)\n",
-               0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) 
pal_vaddr),
-                pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
-                IA64_GRANULE_SHIFT);
+       }
 #else
        void *pal_vaddr = efi_get_pal_addr ();
        u64 psr;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/irq_ia64.c
--- a/xen/arch/ia64/linux-xen/irq_ia64.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/irq_ia64.c        Thu Mar  2 10:00:49 2006
@@ -169,7 +169,7 @@
         * handler needs to be able to wait for further keyboard interrupts, 
which can't
         * come through until ia64_eoi() has been done.
         */
-       xen_irq_exit(regs);
+       irq_exit();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/minstate.h
--- a/xen/arch/ia64/linux-xen/minstate.h        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/minstate.h        Thu Mar  2 10:00:49 2006
@@ -175,7 +175,7 @@
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r13,16;                                       
                \
 .mem.offset 8,0; st8.spill [r17]=r21,16;       /* save ar.fpsr */              
                \
-       /* XEN mov r13=IA64_KR(CURRENT);        /* establish `current' */       
                        \
+       /* XEN mov r13=IA64_KR(CURRENT);*/      /* establish `current' */       
                        \
        MINSTATE_GET_CURRENT(r13);              /* XEN establish `current' */   
                        \
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r15,16;                                       
                \
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/setup.c   Thu Mar  2 10:00:49 2006
@@ -94,6 +94,10 @@
 EXPORT_SYMBOL(io_space);
 unsigned int num_io_spaces;
 
+#ifdef XEN
+extern void early_cmdline_parse(char **);
+#endif
+
 /*
  * "flush_icache_range()" needs to know what processor dependent stride size 
to use
  * when it makes i-cache(s) coherent with d-caches.
@@ -500,6 +504,7 @@
        paging_init();
 }
 
+#ifndef XEN
 /*
  * Display cpu info for all cpu's.
  */
@@ -611,14 +616,13 @@
 {
 }
 
-#ifndef XEN
 struct seq_operations cpuinfo_op = {
        .start =        c_start,
        .next =         c_next,
        .stop =         c_stop,
        .show =         show_cpuinfo
 };
-#endif
+#endif /* XEN */
 
 void
 identify_cpu (struct cpuinfo_ia64 *c)
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/unaligned.c
--- a/xen/arch/ia64/linux-xen/unaligned.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Thu Mar  2 10:00:49 2006
@@ -216,6 +216,7 @@
        RPT(r28), RPT(r29), RPT(r30), RPT(r31)
 };
 
+#ifndef XEN
 static u16 fr_info[32]={
        0,                      /* constant : WE SHOULD NEVER GET THIS */
        0,                      /* constant : WE SHOULD NEVER GET THIS */
@@ -285,6 +286,7 @@
        }
 #      undef F
 }
+#endif /* XEN */
 
 static inline unsigned long
 rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
@@ -299,12 +301,11 @@
 void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
 {
-       struct switch_stack *sw = (struct switch_stack *) regs - 1;
-       unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr;
        unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-       unsigned long rnats, nat_mask;
+       unsigned long nat_mask;
     unsigned long old_rsc,new_rsc;
-       unsigned long on_kbs,rnat;
+       unsigned long rnat;
        long sof = (regs->cr_ifs) & 0x7f;
        long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
        long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
@@ -323,7 +324,7 @@
     new_rsc=old_rsc&(~0x3);
     ia64_set_rsc(new_rsc);
 
-    bspstore = ia64_get_bspstore();
+    bspstore = (unsigned long*)ia64_get_bspstore();
     bsp =kbs + (regs->loadrs >> 19);//16+3
 
        addr = ia64_rse_skip_regs(bsp, -sof + ridx);
@@ -335,7 +336,7 @@
         ia64_flushrs ();
         ia64_mf ();
                *addr = val;
-        bspstore = ia64_get_bspstore();
+        bspstore = (unsigned long*)ia64_get_bspstore();
        rnat = ia64_get_rnat ();
         if(bspstore < rnat_addr){
             rnat=rnat&(~nat_mask);
@@ -362,13 +363,11 @@
 
 
 static void
-get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
-{
-    struct switch_stack *sw = (struct switch_stack *) regs - 1;
-    unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
int*nat)
+{
+    unsigned long *bsp, *addr, *rnat_addr, *bspstore;
     unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
-    unsigned long rnats, nat_mask;
-    unsigned long on_kbs;
+    unsigned long nat_mask;
     unsigned long old_rsc, new_rsc;
     long sof = (regs->cr_ifs) & 0x7f;
     long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
@@ -388,7 +387,7 @@
     new_rsc=old_rsc&(~(0x3));
     ia64_set_rsc(new_rsc);
 
-    bspstore = ia64_get_bspstore();
+    bspstore = (unsigned long*)ia64_get_bspstore();
     bsp =kbs + (regs->loadrs >> 19); //16+3;
 
     addr = ia64_rse_skip_regs(bsp, -sof + ridx);
@@ -399,14 +398,14 @@
 
         ia64_flushrs ();
         ia64_mf ();
-        bspstore = ia64_get_bspstore();
+        bspstore = (unsigned long*)ia64_get_bspstore();
     }
     *val=*addr;
     if(nat){
         if(bspstore < rnat_addr){
-            *nat=!!(ia64_get_rnat()&nat_mask);
+            *nat=(int)!!(ia64_get_rnat()&nat_mask);
         }else{
-            *nat = !!((*rnat_addr)&nat_mask);
+            *nat = (int)!!((*rnat_addr)&nat_mask);
         }
         ia64_set_rsc(old_rsc);
     }
@@ -634,6 +633,7 @@
        return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
 }
 
+#ifndef XEN
 static void
 setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
 {
@@ -682,6 +682,7 @@
                regs->cr_ipsr |= IA64_PSR_MFL;
        }
 }
+#endif /* XEN */
 
 /*
  * Those 2 inline functions generate the spilled versions of the constant 
floating point
@@ -699,6 +700,7 @@
        ia64_stf_spill(final, 1);
 }
 
+#ifndef XEN
 static void
 getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
 {
@@ -748,6 +750,7 @@
                }
        }
 }
+#endif /* XEN */
 
 
 #ifdef XEN
@@ -803,6 +806,7 @@
                *nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
 }
 
+#ifndef XEN
 static void
 emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, 
unsigned long ifa)
 {
@@ -1078,6 +1082,7 @@
 
        return 0;
 }
+#endif /* XEN */
 
 /*
  * floating point operations sizes in bytes
@@ -1153,6 +1158,7 @@
        ia64_stfd(final, 6);
 }
 
+#ifndef XEN
 static int
 emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs 
*regs)
 {
@@ -1437,6 +1443,7 @@
        return 0;
 
 }
+#endif /* XEN */
 
 void
 ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux-xen/unwind.c
--- a/xen/arch/ia64/linux-xen/unwind.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux-xen/unwind.c  Thu Mar  2 10:00:49 2006
@@ -484,7 +484,8 @@
        } else if (regnum <= 15) {
                if (regnum <= 11) {
                        pt = get_scratch_regs(info);
-                       addr = &pt->f6  + (regnum - 6);
+                       //XXX struct ia64_fpreg and struct pt_fpreg are same.
+                       addr = (struct ia64_fpreg*)(&pt->f6  + (regnum - 6));
                }
                else
                        addr = &info->sw->f12 + (regnum - 12);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux/README.origin
--- a/xen/arch/ia64/linux/README.origin Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux/README.origin Thu Mar  2 10:00:49 2006
@@ -5,14 +5,15 @@
 the instructions in the README there.
 
 cmdline.c              -> linux/lib/cmdline.c
-efi_stub.S             -> linux/arch/ia64/efi_stub.S
+efi_stub.S             -> linux/arch/ia64/kernel/efi_stub.S
 extable.c              -> linux/arch/ia64/mm/extable.c
 hpsim.S                        -> linux/arch/ia64/hp/sim/hpsim.S
 ia64_ksyms.c           -> linux/arch/ia64/kernel/ia64_ksyms.c
+irq_lsapic.c           -> linux/arch/ia64/kernel/irq_lsapic.c
 linuxextable.c         -> linux/kernel/extable.c
 machvec.c              -> linux/arch/ia64/kernel/machvec.c
 patch.c                        -> linux/arch/ia64/kernel/patch.c
-pcdp.h                 -> drivers/firmware/pcdp.h
+pcdp.h                 -> linux/drivers/firmware/pcdp.h
 
 bitop.c                        -> linux/arch/ia64/lib/bitop.c
 clear_page.S           -> linux/arch/ia64/lib/clear_page.S
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux/cmdline.c
--- a/xen/arch/ia64/linux/cmdline.c     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux/cmdline.c     Thu Mar  2 10:00:49 2006
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <xen/lib.h>
 
 
 /**
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/linux/linuxextable.c
--- a/xen/arch/ia64/linux/linuxextable.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/linux/linuxextable.c        Thu Mar  2 10:00:49 2006
@@ -19,6 +19,10 @@
 #include <linux/init.h>
 #include <asm/uaccess.h>
 #include <asm/sections.h>
+
+extern void *search_module_extables(unsigned long addr);
+extern void *__module_text_address(unsigned long addr);
+extern void *module_text_address(unsigned long addr);
 
 extern struct exception_table_entry __start___ex_table[];
 extern struct exception_table_entry __stop___ex_table[];
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/mm.c
--- a/xen/arch/ia64/vmx/mm.c    Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/mm.c    Thu Mar  2 10:00:49 2006
@@ -106,17 +106,18 @@
     u64 mfn, gpfn;
     VCPU *vcpu;
     mmu_update_t req;
-    ia64_rr rr;
+    /* ia64_rr rr; */
     thash_cb_t *hcb;
-    thash_data_t entry={0},*ovl;
+    /* thash_data_t entry={0},*ovl; */
     vcpu = current;
-    search_section_t sections;
+    /* search_section_t sections; */
     hcb = vmx_vcpu_get_vtlb(vcpu);
     for ( i = 0; i < count; i++ )
     {
         copy_from_user(&req, ureqs, sizeof(req));
         cmd = req.ptr&3;
         req.ptr &= ~3;
+/*
         if(cmd ==MMU_NORMAL_PT_UPDATE){
             entry.page_flags = req.val;
             entry.locked = 1;
@@ -133,10 +134,12 @@
             if (ovl) {
                   // generate MCA.
                 panic("Tlb conflict!!");
-                return;
+                return -1;
             }
-            thash_purge_and_insert(hcb, &entry);
-        }else if(cmd == MMU_MACHPHYS_UPDATE){
+            thash_purge_and_insert(hcb, &entry, req.ptr);
+        }else
+ */
+        if(cmd == MMU_MACHPHYS_UPDATE){
             mfn = req.ptr >>PAGE_SHIFT;
             gpfn = req.val;
             set_machinetophys(mfn,gpfn);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/mmio.c  Thu Mar  2 10:00:49 2006
@@ -32,6 +32,7 @@
 #include <public/hvm/ioreq.h>
 #include <asm/mm.h>
 #include <asm/vmx.h>
+#include <public/event_channel.h>
 
 /*
 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
@@ -135,7 +136,6 @@
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
-    unsigned long addr;
 
     vio = get_vio(v->domain, v->vcpu_id);
     if (vio == 0) {
@@ -168,7 +168,6 @@
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
-    unsigned long addr;
 
     vio = get_vio(v->domain, v->vcpu_id);
     if (vio == 0) {
@@ -406,7 +405,7 @@
 {
     REGS *regs;
     IA64_BUNDLE bundle;
-    int slot, dir, inst_type;
+    int slot, dir=0, inst_type;
     size_t size;
     u64 data, value,post_update, slot1a, slot1b, temp;
     INST64 inst;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/pal_emul.c
--- a/xen/arch/ia64/vmx/pal_emul.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/pal_emul.c      Thu Mar  2 10:00:49 2006
@@ -19,6 +19,7 @@
  */
 
 #include <asm/vmx_vcpu.h>
+#include <asm/pal.h>
 
 static void
 get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
@@ -180,10 +181,18 @@
 
 static struct ia64_pal_retval
 pal_freq_base(VCPU *vcpu){
+    struct ia64_pal_retval result;
+
+    PAL_CALL(result,PAL_FREQ_BASE, 0, 0, 0);
+    return result;
 }
 
 static struct ia64_pal_retval
 pal_freq_ratios(VCPU *vcpu){
+    struct ia64_pal_retval result;
+
+    PAL_CALL(result,PAL_FREQ_RATIOS, 0, 0, 0);
+    return result;
 }
 
 static struct ia64_pal_retval
@@ -229,7 +238,6 @@
 static struct ia64_pal_retval
 pal_vm_page_size(VCPU *vcpu){
 }
-
 void
 pal_emul( VCPU *vcpu) {
        UINT64 gr28;
@@ -266,9 +274,17 @@
                case PAL_CACHE_WRITE:
                        result = pal_cache_write (vcpu);
                        break;
-                       
+
                case PAL_PLATFORM_ADDR:
                        result = pal_platform_addr (vcpu);
+                       break;
+
+               case PAL_FREQ_RATIOS:
+                       result = pal_freq_ratios (vcpu);
+                       break;
+
+               case PAL_FREQ_BASE:
+                       result = pal_freq_base (vcpu);
                        break;
 
                default:
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vlsapic.c       Thu Mar  2 10:00:49 2006
@@ -47,6 +47,9 @@
 /*
  * Update the checked last_itc.
  */
+
+extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
+     UINT64 vector,REGS *regs);
 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
 {
     vtm->last_itc = cur_itc;
@@ -483,7 +486,7 @@
 
     if (vector & ~0xff) {
         DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
-        return;
+        return -1;
     }
     local_irq_save(spsr);
     ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
@@ -572,12 +575,13 @@
     VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
     local_irq_restore(spsr);
     VCPU(vcpu, eoi)=0;    // overwrite the data
-    vmx_check_pending_irq(vcpu);
+    vcpu->arch.irq_new_pending=1;
+//    vmx_check_pending_irq(vcpu);
 }
 
 uint64_t guest_read_vivr(VCPU *vcpu)
 {
-    int vec, next, h_inservice;
+    int vec, h_inservice;
     uint64_t  spsr;
 
     local_irq_save(spsr);
@@ -609,7 +613,7 @@
     vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
 }
 
-vhpi_detection(VCPU *vcpu)
+void vhpi_detection(VCPU *vcpu)
 {
     uint64_t    threshold,vhpi;
     tpr_t       vtpr;
@@ -626,7 +630,7 @@
     }
 }
 
-vmx_vexirq(VCPU *vcpu)
+void vmx_vexirq(VCPU *vcpu)
 {
     static  uint64_t  vexirq_count=0;
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmmu.c  Thu Mar  2 10:00:49 2006
@@ -31,39 +31,26 @@
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
-
-/*
- * Architecture ppn is in 4KB unit while XEN
- * page may be different(1<<PAGE_SHIFT).
- */
-static inline u64 arch_ppn_to_xen_ppn(u64 appn)
-{
-    return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
-}
-
-static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
-{
-    return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
-}
-
+#include <xen/irq.h>
 
 /*
  * Get the machine page frame number in 16KB unit
  * Input:
  *  d: 
  */
-u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
-{
-    struct domain *d;
-    u64    i, xen_gppn, xen_mppn, mpfn;
-    
+u64 get_mfn(struct domain *d, u64 gpfn)
+{
+//    struct domain *d;
+    u64    xen_gppn, xen_mppn, mpfn;
+/*
     if ( domid == DOMID_SELF ) {
         d = current->domain;
     }
     else {
         d = find_domain_by_id(domid);
     }
-    xen_gppn = arch_ppn_to_xen_ppn(gpfn);
+ */
+    xen_gppn = arch_to_xen_ppn(gpfn);
     xen_mppn = gmfn_to_mfn(d, xen_gppn);
 /*
     for (i=0; i<pages; i++) {
@@ -72,8 +59,8 @@
         }
     }
 */
-    mpfn= xen_ppn_to_arch_ppn(xen_mppn);
-    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
+    mpfn= xen_to_arch_ppn(xen_mppn);
+    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
     return mpfn;
     
 }
@@ -141,66 +128,67 @@
 #endif
 }
 
-static thash_cb_t *init_domain_vhpt(struct vcpu *d)
-{
-    struct page_info *page;
-    void   *vbase,*vcur;
-    vhpt_special *vs;
+static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur)
+{
+//    struct page_info *page;
     thash_cb_t  *vhpt;
     PTA pta_value;
-    
-    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
+/*
+    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
         panic("No enough contiguous memory for init_domain_mm\n");
     }
     vbase = page_to_virt(page);
     printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
-    memset(vbase, 0, VCPU_TLB_SIZE);
-    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
+    memset(vbase, 0, VCPU_VHPT_SIZE);
+ */
+//    vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE);
     vcur -= sizeof (thash_cb_t);
     vhpt = vcur;
     vhpt->ht = THASH_VHPT;
     vhpt->vcpu = d;
-    vhpt->hash_func = machine_thash;
-    vcur -= sizeof (vhpt_special);
-    vs = vcur;
+//    vhpt->hash_func = machine_thash;
+//    vcur -= sizeof (vhpt_special);
+//    vs = vcur;
 
     /* Setup guest pta */
     pta_value.val = 0;
     pta_value.ve = 1;
     pta_value.vf = 1;
-    pta_value.size = VCPU_TLB_SHIFT - 1;    /* 2M */
+    pta_value.size = VCPU_VHPT_SHIFT - 1;    /* 16M*/
     pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
     d->arch.arch_vmx.mpta = pta_value.val;
-   
-    vhpt->vs = vs;
-    vhpt->vs->get_mfn = get_mfn;
-    vhpt->vs->tag_func = machine_ttag;
+
+//    vhpt->vs = vs;
+//    vhpt->vs->get_mfn = __gpfn_to_mfn_foreign;
+//    vhpt->vs->tag_func = machine_ttag;
     vhpt->hash = vbase;
-    vhpt->hash_sz = VCPU_TLB_SIZE/2;
-    vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
+    vhpt->hash_sz = VCPU_VHPT_SIZE/2;
+    vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
     vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
-    vhpt->recycle_notifier = recycle_message;
-    thash_init(vhpt,VCPU_TLB_SHIFT-1);
+//    vhpt->recycle_notifier = recycle_message;
+    thash_init(vhpt,VCPU_VHPT_SHIFT-1);
     return vhpt;
 }
 
 
+
 thash_cb_t *init_domain_tlb(struct vcpu *d)
 {
     struct page_info *page;
-    void    *vbase,*vcur;
+    void    *vbase, *vhptbase, *vcur;
     tlb_special_t  *ts;
     thash_cb_t  *tlb;
     
-    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
+    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     if ( page == NULL ) {
         panic("No enough contiguous memory for init_domain_mm\n");
     }
-    vbase = page_to_virt(page);
-    printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
-    memset(vbase, 0, VCPU_TLB_SIZE);
-    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
+    vhptbase = page_to_virt(page);
+    memset(vhptbase, 0, VCPU_VHPT_SIZE);
+    printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase);
+    vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE;
+    vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE);
     vcur -= sizeof (thash_cb_t);
     tlb = vcur;
     tlb->ht = THASH_TLB;
@@ -208,14 +196,14 @@
     vcur -= sizeof (tlb_special_t);
     ts = vcur;
     tlb->ts = ts;
-    tlb->ts->vhpt = init_domain_vhpt(d);
-    tlb->hash_func = machine_thash;
+    tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase);
+//    tlb->hash_func = machine_thash;
     tlb->hash = vbase;
-    tlb->hash_sz = VCPU_TLB_SIZE/2;
-    tlb->cch_buf = (u64)vbase + tlb->hash_sz;
+    tlb->hash_sz = VCPU_VTLB_SIZE/2;
+    tlb->cch_buf = (void *)(vbase + tlb->hash_sz);
     tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
-    tlb->recycle_notifier = recycle_message;
-    thash_init(tlb,VCPU_TLB_SHIFT-1);
+//    tlb->recycle_notifier = recycle_message;
+    thash_init(tlb,VCPU_VTLB_SHIFT-1);
     return tlb;
 }
 
@@ -249,13 +237,14 @@
     u64     psr;
     thash_data_t    mtlb;
     unsigned int    cl = tlb->cl;
-
+    unsigned long mtlb_ppn;
     mtlb.ifa = tlb->vadr;
     mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
     //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
     mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
-    mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1);
-    if (mtlb.ppn == INVALID_MFN)
+    mtlb.ppn = get_mfn(d->domain,tlb->ppn);
+    mtlb_ppn=mtlb.ppn;
+    if (mtlb_ppn == INVALID_MFN)
     panic("Machine tlb insert with invalid mfn number.\n");
 
     psr = ia64_clear_ic();
@@ -287,44 +276,33 @@
 //    ia64_srlz_i();
 //    return;
 }
-
-u64 machine_thash(PTA pta, u64 va)
-{
-    u64     saved_pta;
-    u64     hash_addr, tag;
-    unsigned long psr;
-    struct vcpu *v = current;
-    ia64_rr vrr;
-
-    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
-    psr = ia64_clear_ic();
-    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-    hash_addr = ia64_thash(va);
-    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-    ia64_set_psr(psr);
-    ia64_srlz_i();
-    return hash_addr;
-}
-
-u64 machine_ttag(PTA pta, u64 va)
-{
-//    u64     saved_pta;
-//    u64     hash_addr, tag;
-//    u64     psr;
-//    struct vcpu *v = current;
-
-//    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
-//    psr = ia64_clear_ic();
-//    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-//    tag = ia64_ttag(va);
+/*
+u64 machine_thash(u64 va)
+{
+    return ia64_thash(va);
+}
+
+u64 machine_ttag(u64 va)
+{
     return ia64_ttag(va);
-//    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
-//    ia64_set_psr(psr);
-//    ia64_srlz_i();
-//    return tag;
-}
-
-
+}
+*/
+thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
+{
+    u64 index,pfn,rid,pfn_bits;
+    pfn_bits = vpta.size-5-8;
+    pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
+    rid = _REGION_ID(vrr);
+    index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
+    *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
+    return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
+//    return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
+}
+
+//u64 vsa_ttag(u64 va, u64 vrr)
+//{
+//    return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
+//}
 
 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
 {
@@ -371,11 +349,12 @@
  *  num:  number of dword (8byts) to read.
  */
 int
-fetch_code(VCPU *vcpu, u64 gip, u64 *code)
-{
-    u64     gpip;   // guest physical IP
-    u64     mpa;
+fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
+{
+    u64     gpip=0;   // guest physical IP
+    u64     *vpa;
     thash_data_t    *tlb;
+    thash_cb_t *hcb;
     ia64_rr vrr;
     u64     mfn;
 
@@ -384,19 +363,26 @@
     }
     else {
         vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
-        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
-                vrr.rid, gip, ISIDE_TLB );
+       hcb = vmx_vcpu_get_vtlb(vcpu);
+        tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB );
         if( tlb == NULL )
-             tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
+             tlb = vtlb_lookup_ex (hcb,
                 vrr.rid, gip, DSIDE_TLB );
-        if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
-        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
-    }
-    mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
-    if ( mfn == INVALID_MFN ) return 0;
- 
-    mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
-    *code = *(u64*)__va(mpa);
+        if (tlb) 
+               gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
+    }
+    if( gpip){
+        mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
+       if( mfn == INVALID_MFN )  panic("fetch_code: invalid memory\n");
+       vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
+    }else{
+       tlb = vhpt_lookup(gip);
+       if( tlb == NULL)
+           panic("No entry found in ITLB and DTLB\n");
+       vpa =(u64 
*)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
+    }
+    *code1 = *vpa++;
+    *code2 = *vpa;
     return 1;
 }
 
@@ -414,19 +400,19 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 1;
     data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
-    }
-    thash_purge_and_insert(hcb, &data);
+        return IA64_FAULT;
+    }
+    thash_purge_and_insert(hcb, &data, ifa);
     return IA64_NO_FAULT;
 }
 
@@ -447,24 +433,26 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 1;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
     if (ovl) {
           // generate MCA.
         panic("Tlb conflict!!");
-        return;
-    }
-    thash_purge_and_insert(hcb, &data);
+        return IA64_FAULT;
+    }
+    thash_purge_and_insert(hcb, &data, ifa);
     return IA64_NO_FAULT;
 }
 
 /*
  * Return TRUE/FALSE for success of lock operation
  */
+
+/*
 int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
 {
 
@@ -478,6 +466,9 @@
     preferred_size = PSIZE(vrr.ps);
     return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
 }
+ */
+
+
 
 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, 
UINT64 idx)
 {
@@ -486,6 +477,7 @@
     thash_cb_t  *hcb;
     search_section_t sections;
     ia64_rr vrr;
+    /* u64 mfn,psr; */
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -493,21 +485,38 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 0;
     data.cl=ISIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+
+    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
     if (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     sections.tr = 0;
     sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
+/*    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        data.contiguous=1;
+    }
+ */
     thash_tr_insert(hcb, &data, ifa, idx);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
+        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
+        data.ppn = xen_to_arch_ppn(mfn);
+        psr = ia64_clear_ic();
+        ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, 
data.ps);
+        ia64_set_psr(psr);      // restore psr
+        ia64_srlz_i();
+//        return IA64_NO_FAULT;
+    }
+*/
     return IA64_NO_FAULT;
 }
 
@@ -518,7 +527,7 @@
     thash_cb_t  *hcb;
     search_section_t sections;
     ia64_rr    vrr;
-
+    /* u64 mfn,psr; */
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -526,21 +535,39 @@
     data.vadr=PAGEALIGN(ifa,data.ps);
     data.tc = 0;
     data.cl=DSIDE_TLB;
-    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
     data.rid = vrr.rid;
     sections.tr = 1;
     sections.tc = 0;
 
-    ovl = thash_find_overlap(hcb, &data, sections);
+    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
     while (ovl) {
         // generate MCA.
         panic("Tlb conflict!!");
-        return;
+        return IA64_FAULT;
     }
     sections.tr = 0;
     sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        data.contiguous=1;
+    }
+ */
     thash_tr_insert(hcb, &data, ifa, idx);
+/*
+    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
+        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
+        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
+        data.ppn = xen_to_arch_ppn(mfn);
+        psr = ia64_clear_ic();
+        ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, 
data.ps);
+        ia64_set_psr(psr);      // restore psr
+        ia64_srlz_i();
+//        return IA64_NO_FAULT;
+    }
+*/
+
     return IA64_NO_FAULT;
 }
 
@@ -578,7 +605,6 @@
     thash_cb_t  *hcb;
     ia64_rr vrr;
     search_section_t sections;
-    thash_data_t data, *ovl;
     hcb = vmx_vcpu_get_vtlb(vcpu);
     vrr=vmx_vcpu_rr(vcpu,vadr);
     sections.tr = 0;
@@ -616,7 +642,7 @@
 {
     PTA vpta;
     ia64_rr vrr;
-    u64 vhpt_offset,tmp;
+    u64 vhpt_offset;
     vmx_vcpu_get_pta(vcpu, &vpta.val);
     vrr=vmx_vcpu_rr(vcpu, vadr);
     if(vpta.vf){
@@ -686,7 +712,25 @@
             *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
             return IA64_NO_FAULT;
         }
-    }else{
+    }
+    data = vhpt_lookup(vadr);
+    if(data){
+        if(data->p==0){
+            visr.na=1;
+            vcpu_set_isr(vcpu,visr.val);
+            page_not_present(vcpu, vadr);
+            return IA64_FAULT;
+        }else if(data->ma == VA_MATTR_NATPAGE){
+            visr.na = 1;
+            vcpu_set_isr(vcpu, visr.val);
+            dnat_page_consumption(vcpu, vadr);
+            return IA64_FAULT;
+        }else{
+            *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | 
(vadr&(PAGE_SIZE-1));
+            return IA64_NO_FAULT;
+        }
+    }
+    else{
         if(!vhpt_enabled(vcpu, vadr, NA_REF)){
             if(vpsr.ic){
                 vcpu_set_isr(vcpu, visr.val);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Thu Mar  2 10:00:49 2006
@@ -34,6 +34,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/vhpt.h>
+#include <asm/vmmu.h>
 #include "vmx_minstate.h"
 
 /*
@@ -696,7 +697,7 @@
    movl r25=PAGE_KERNEL
    ;;
    or loc5 = r25,loc5          // construct PA | page properties
-   mov r23 = IA64_GRANULE_SHIFT <<2
+   mov r23 = VCPU_VHPT_SHIFT <<2
    ;;
    ptr.d   in3,r23
    ;;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Thu Mar  2 10:00:49 2006
@@ -31,6 +31,11 @@
 #include <xen/mm.h>
 #include <xen/multicall.h>
 #include <xen/hypercall.h>
+#include <public/version.h>
+#include <asm/dom_fw.h>
+#include <xen/domain.h>
+
+extern long do_sched_op(int cmd, unsigned long arg);
 
 
 void hyper_not_support(void)
@@ -48,7 +53,7 @@
     vcpu_get_gr_nat(vcpu,17,&r33);
     vcpu_get_gr_nat(vcpu,18,&r34);
     vcpu_get_gr_nat(vcpu,19,&r35);
-    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35);
     vcpu_set_gr(vcpu, 8, ret, 0);
     vmx_vcpu_increment_iip(vcpu);
 }
@@ -124,7 +129,6 @@
 
 static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
 {
-    int i;
     ia64_rr rr;
     thash_cb_t *hcb;
     hcb = vmx_vcpu_get_vtlb(vcpu);
@@ -136,6 +140,8 @@
  * Lock guest page in vTLB, so that it's not relinquished by recycle
  * session when HV is servicing that hypercall.
  */
+
+/*
 void hyper_lock_page(void)
 {
 //TODO:
@@ -148,6 +154,7 @@
 
     vmx_vcpu_increment_iip(vcpu);
 }
+ */
 
 static int do_set_shared_page(VCPU *vcpu, u64 gpa)
 {
@@ -169,7 +176,7 @@
         * to xen heap. Or else, leave to domain itself to decide.
         */
        if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
-               free_xenheap_page(o_info);
+               free_xenheap_page((void *)o_info);
     } else
         memset(d->shared_info, 0, PAGE_SIZE);
     return 0;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c      Thu Mar  2 10:00:49 2006
@@ -96,7 +96,7 @@
        if (!(vp_env_info & VP_OPCODE))
                printk("WARNING: no opcode provided from hardware(%lx)!!!\n", 
vp_env_info);
        vm_order = get_order(buffer_size);
-       printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
+       printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
 
        vmx_enabled = 1;
 no_vti:
@@ -114,7 +114,7 @@
        u64 status, tmp_base;
 
        if (!vm_buffer) {
-               vm_buffer = alloc_xenheap_pages(vm_order);
+               vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
                ASSERT(vm_buffer);
                printk("vm_buffer: 0x%lx\n", vm_buffer);
        }
@@ -126,7 +126,7 @@
 
        if (status != PAL_STATUS_SUCCESS) {
                printk("ia64_pal_vp_init_env failed.\n");
-               return -1;
+               return ;
        }
 
        if (!__vsa_base)
@@ -172,7 +172,15 @@
        cpuid3.number = 4;      /* 5 - 1 */
        vpd->vcpuid[3] = cpuid3.value;
 
+    vpd->vac.a_from_int_cr = 1;
+    vpd->vac.a_to_int_cr = 1;
+    vpd->vac.a_from_psr = 1;
+    vpd->vac.a_from_cpuid = 1;
+    vpd->vac.a_cover = 1;
+    vpd->vac.a_bsw = 1;
+
        vpd->vdc.d_vmsw = 1;
+
        return vpd;
 }
 
@@ -190,7 +198,7 @@
        /* ia64_ivt is function pointer, so need this tranlation */
        ivt_base = (u64) &vmx_ia64_ivt;
        printk("ivt_base: 0x%lx\n", ivt_base);
-       ret = ia64_pal_vp_create(vpd, ivt_base, 0);
+       ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
        if (ret != PAL_STATUS_SUCCESS)
                panic("ia64_pal_vp_create failed. \n");
 }
@@ -199,11 +207,10 @@
 void
 vmx_save_state(struct vcpu *v)
 {
-       u64 status, psr;
-       u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
+       u64 status;
 
        /* FIXME: about setting of pal_proc_vector... time consuming */
-       status = ia64_pal_vp_save(v->arch.privregs, 0);
+       status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS)
                panic("Save vp status failed\n");
 
@@ -225,10 +232,7 @@
 void
 vmx_load_state(struct vcpu *v)
 {
-       u64 status, psr;
-       u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
-       u64 pte_xen, pte_vhpt;
-       int i;
+       u64 status;
 
        status = ia64_pal_vp_restore(v->arch.privregs, 0);
        if (status != PAL_STATUS_SUCCESS)
@@ -304,7 +308,7 @@
 int vmx_alloc_contig_pages(struct domain *d)
 {
        unsigned int order;
-       unsigned long i, j, start, end, pgnr, conf_nr;
+       unsigned long i, j, start,tmp, end, pgnr, conf_nr;
        struct page_info *page;
        struct vcpu *v = d->vcpu[0];
 
@@ -315,57 +319,105 @@
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               map_domain_page(d, j, io_ranges[i].type);
+               assign_domain_page(d, j, io_ranges[i].type);
        }
 
        conf_nr = VMX_CONFIG_PAGES(d);
+    if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1)))
+        panic("vti domain needs 128M memory at least\n");
+/*
        order = get_order_from_pages(conf_nr);
        if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
            printk("Could not allocate order=%d pages for vmx contig alloc\n",
                        order);
            return -1;
        }
+*/
+ 
+/* reserve contiguous 64M for linux kernel */
+
+    if (unlikely((page = 
alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
+    pgnr = page_to_mfn(page);
+       for 
(i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
+
+       for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
 
        /* Map normal memory below 3G */
-       pgnr = page_to_mfn(page);
        end = conf_nr << PAGE_SHIFT;
-       for (i = 0;
-            i < (end < MMIO_START ? end : MMIO_START);
-            i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
-
+    tmp = end < MMIO_START ? end : MMIO_START;
+       for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
        /* Map normal memory beyond 4G */
        if (unlikely(end > MMIO_START)) {
            start = 4 * MEM_G;
            end = start + (end - 3 * MEM_G);
-           for (i = start; i < end; i += PAGE_SIZE, pgnr++)
-               map_domain_page(d, i, pgnr << PAGE_SHIFT);
+           for (i = start; i < end; i += PAGE_SIZE){
+            if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+                printk("No enough memory for vti domain!!!\n");
+                return -1;
+            }
+            pgnr = page_to_mfn(page);
+            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+        }
        }
 
        d->arch.max_pfn = end >> PAGE_SHIFT;
-
+/*
        order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
        if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
            printk("Could not allocate order=%d pages for vmx contig alloc\n",
-                       order);
+                       order);`
            return -1;
        }
-
+*/
        /* Map guest firmware */
-       pgnr = page_to_mfn(page);
-       for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
-           map_domain_page(d, i, pgnr << PAGE_SHIFT);
-
+       for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){
+        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+            printk("No enough memory for vti domain!!!\n");
+            return -1;
+        }
+           pgnr = page_to_mfn(page);
+           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+    }
+
+/*
        if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
            printk("Could not allocate order=1 pages for vmx contig alloc\n");
            return -1;
        }
-
+*/
        /* Map for shared I/O page and xenstore */
+    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
        pgnr = page_to_mfn(page);
-       map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
-       pgnr++;
-       map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
+       assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
+
+    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
+        printk("No enough memory for vti domain!!!\n");
+        return -1;
+    }
+       pgnr = page_to_mfn(page);
+       assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
 
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
        return 0;
@@ -375,7 +427,7 @@
 {
        ASSERT(d != dom0); /* only for non-privileged vti domain */
        d->arch.vmx_platform.shared_page_va =
-               __va(__gpa_to_mpa(d, IO_PAGE_START));
+               (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
        /* TEMP */
        d->arch.vmx_platform.pib_base = 0xfee00000UL;
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_interrupt.c
--- a/xen/arch/ia64/vmx/vmx_interrupt.c Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c Thu Mar  2 10:00:49 2006
@@ -86,7 +86,7 @@
 
 }
 
-int
+void
 inject_guest_interruption(VCPU *vcpu, u64 vec)
 {
     u64 viva;
@@ -334,6 +334,7 @@
  *  @ Nat Consumption Vector
  * Refer to SDM Vol2 Table 5-6 & 8-1
  */
+
 static void
 ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
 {
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_irq_ia64.c
--- a/xen/arch/ia64/vmx/vmx_irq_ia64.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c  Thu Mar  2 10:00:49 2006
@@ -24,6 +24,8 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+#include <asm/vcpu.h>
+#include <xen/irq.h>
 #ifdef CONFIG_SMP
 #   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
 #else
@@ -126,6 +128,6 @@
         * come through until ia64_eoi() has been done.
         */
        vmx_irq_exit();
-       if ( wake_dom0 && current != dom0 ) 
+       if (current && wake_dom0 != dom0 ) 
                vcpu_wake(dom0->vcpu[0]);
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Thu Mar  2 10:00:49 2006
@@ -269,6 +269,10 @@
 (p7)br.sptk vmx_fault_3
 vmx_alt_itlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
+    ;;
+    tbit.z p6,p7=r16,63
+(p6)br.sptk vmx_fault_3
+    ;;
        movl r17=PAGE_KERNEL
        mov r24=cr.ipsr
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
@@ -300,6 +304,10 @@
 (p7)br.sptk vmx_fault_4
 vmx_alt_dtlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
+    ;;
+    tbit.z p6,p7=r16,63
+(p6)br.sptk vmx_fault_4
+    ;;
        movl r17=PAGE_KERNEL
        mov r20=cr.isr
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
@@ -397,7 +405,7 @@
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
 ENTRY(vmx_interrupt)
-    VMX_DBG_FAULT(12)
+//    VMX_DBG_FAULT(12)
        mov r31=pr              // prepare to save predicates
     mov r19=12
     mov r29=cr.ipsr
@@ -734,7 +742,7 @@
 
/////////////////////////////////////////////////////////////////////////////////////////
 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
 ENTRY(vmx_virtualization_fault)
-    VMX_DBG_FAULT(37)
+//    VMX_DBG_FAULT(37)
        mov r31=pr
     mov r19=37
     adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
@@ -1138,5 +1146,5 @@
     data8 hyper_not_support     //hyper_boot_vcpu
     data8 hyper_not_support     //hyper_ni_hypercall       /* 25 */
     data8 hyper_not_support     //hyper_mmuext_op
-    data8 hyper_lock_page
+    data8 hyper_not_support     //tata8 hyper_lock_page
     data8 hyper_set_shared_page
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Mar  2 10:00:49 2006
@@ -27,7 +27,7 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/sched.h>
 #include <asm/pgtable.h>
-
+#include <asm/vmmu.h>
 int valid_mm_mode[8] = {
     GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
     INV_MODE,
@@ -61,9 +61,9 @@
      *  data access can be satisfied though itlb entry for physical
      *  emulation is hit.
          */
-    SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V,
-    0,  0,  0,  0,  0,  0,  0,  0,
-    0,  0,  0,  0,  0,  0,  0,  0,
+    {SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
+    {0,  0,  0,  0,  0,  0,  0,  0},
+    {0,  0,  0,  0,  0,  0,  0,  0},
     /*
      *  (it,dt,rt): (0,1,1) -> (1,1,1)
      *  This kind of transition is found in OSYa.
@@ -71,17 +71,17 @@
      *  (it,dt,rt): (0,1,1) -> (0,0,0)
      *  This kind of transition is found in OSYa
      */
-    SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V,
+    {SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V},
     /* (1,0,0)->(1,1,1) */
-    0,  0,  0,  0,  0,  0,  0,  SW_P2V,
+    {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
     /*
          *  (it,dt,rt): (1,0,1) -> (1,1,1)
          *  This kind of transition usually occurs when Linux returns
      *  from the low level TLB miss handlers.
          *  (see "arch/ia64/kernel/ivt.S")
          */
-    0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V,
-    0,  0,  0,  0,  0,  0,  0,  0,
+    {0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
+    {0,  0,  0,  0,  0,  0,  0,  0},
     /*
          *  (it,dt,rt): (1,1,1) -> (1,0,1)
          *  This kind of transition usually occurs in Linux low level
@@ -94,68 +94,18 @@
      *  (1,1,1)->(1,0,0)
      */
 
-    SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF,
+    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
 };
 
 void
 physical_mode_init(VCPU *vcpu)
 {
-    UINT64 psr;
-    struct domain * d = vcpu->domain;
-
     vcpu->arch.old_rsc = 0;
     vcpu->arch.mode_flags = GUEST_IN_PHY;
 }
 
-extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
-#if 0
-void
-physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
-{
-    u64 psr;
-    IA64_PSR vpsr;
-    u64 mppn,gppn,mpp1,gpp1;
-    struct domain *d;
-    static u64 test=0;
-    d=vcpu->domain;
-    if(test)
-        panic("domn physical itlb miss happen\n");
-    else
-        test=1;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
-    mppn=(mppn<<12)|(vpsr.cpl<<7);
-    gpp1=0;
-    mpp1 = get_mfn(DOMID_SELF,gpp1,1);
-    mpp1=(mpp1<<12)|(vpsr.cpl<<7);
-//    if(vadr>>63)
-//        mppn |= PHY_PAGE_UC;
-//    else
-//        mppn |= PHY_PAGE_WB;
-    mpp1 |= PHY_PAGE_WB;
-    psr=ia64_clear_ic();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), 
(mppn|PHY_PAGE_WB), 24);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), 
(mppn|PHY_PAGE_WB), 24);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
-    ia64_srlz_i();
-    ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
-    ia64_stop();
-    ia64_srlz_i();
-    ia64_set_psr(psr);
-    ia64_srlz_i();
-    return;
-}
-#endif
-
+extern u64 get_mfn(struct domain *d, u64 gpfn);
+extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
 void
 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
 {
@@ -164,7 +114,7 @@
     u64 mppn,gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
+    mppn = get_mfn(vcpu->domain,gppn);
     mppn=(mppn<<12)|(vpsr.cpl<<7); 
 //    if(vadr>>63)
 //       mppn |= PHY_PAGE_UC;
@@ -196,7 +146,7 @@
 //        panic("dom n physical dtlb miss happen\n");
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     gppn=(vadr<<1)>>13;
-    mppn = get_mfn(DOMID_SELF,gppn,1);
+    mppn = get_mfn(vcpu->domain, gppn);
     mppn=(mppn<<12)|(vpsr.cpl<<7);
     if(vadr>>63)
         mppn |= PHY_PAGE_UC;
@@ -404,7 +354,7 @@
         switch_mm_mode (vcpu, old_psr, new_psr);
     }
 
-    return 0;
+    return;
 }
 
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_process.c   Thu Mar  2 10:00:49 2006
@@ -47,9 +47,11 @@
 #include <asm/vmx_vcpu.h>
 #include <asm/kregs.h>
 #include <asm/vmx.h>
+#include <asm/vmmu.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx_phy_mode.h>
 #include <xen/mm.h>
+#include <asm/vmx_pal.h>
 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
 
@@ -65,7 +67,7 @@
     0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
     0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
     0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
-    0x7f00,
+    0x7f00
 };
 
 
@@ -74,7 +76,7 @@
      UINT64 vector,REGS *regs)
 {
     VCPU *vcpu = current;
-    UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
+    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
     if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
         panic("Guest nested fault!");
     }
@@ -92,10 +94,8 @@
 IA64FAULT
 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long 
isr, unsigned long iim)
 {
-       static int first_time = 1;
        struct domain *d = (struct domain *) current->domain;
-       struct vcpu *v = (struct domain *) current;
-       extern unsigned long running_on_sim;
+       struct vcpu *v = (struct vcpu *) current;
        unsigned long i, sal_param[8];
 
 #if 0
@@ -160,12 +160,12 @@
                    case FW_HYPERCALL_EFI_GET_TIME:
                        {
                        unsigned long *tv, *tc;
-                       vcpu_get_gr_nat(v, 32, &tv);
-                       vcpu_get_gr_nat(v, 33, &tc);
+                       vcpu_get_gr_nat(v, 32, (u64 *)&tv);
+                       vcpu_get_gr_nat(v, 33, (u64 *)&tc);
                        printf("efi_get_time(%p,%p) called...",tv,tc);
-                       tv = __va(translate_domain_mpaddr(tv));
-                       if (tc) tc = __va(translate_domain_mpaddr(tc));
-                       regs->r8 = (*efi.get_time)(tv,tc);
+                       tv = __va(translate_domain_mpaddr((unsigned long)tv));
+                       if (tc) tc = __va(translate_domain_mpaddr((unsigned 
long)tc));
+                       regs->r8 = (*efi.get_time)((efi_time_t 
*)tv,(efi_time_cap_t *)tc);
                        printf("and returns %lx\n",regs->r8);
                        }
                        break;
@@ -200,12 +200,13 @@
                        die_if_kernel("bug check", regs, iim);
                vmx_reflect_interruption(ifa,isr,iim,11,regs);
     }
+    return IA64_NO_FAULT;
 }
 
 
 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
 {
-    unsigned long i, * src,* dst, *sunat, *dunat;
+    unsigned long i=0UL, * src,* dst, *sunat, *dunat;
     IA64_PSR vpsr;
     src=&regs->r16;
     sunat=&regs->eml_unat;
@@ -262,10 +263,10 @@
                 *
                 * Now hardcode the vector as 0x10 temporarily
                 */
-               if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
-                       VCPU(v, irr[0]) |= 1UL << 0x10;
-                       v->arch.irq_new_pending = 1;
-               }
+//             if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
+//                     VCPU(v, irr[0]) |= 1UL << 0x10;
+//                     v->arch.irq_new_pending = 1;
+//             }
 
                if ( v->arch.irq_new_pending ) {
                        v->arch.irq_new_pending = 0;
@@ -287,16 +288,17 @@
 }
 
 /* We came here because the H/W VHPT walker failed to find an entry */
-void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
+IA64FAULT
+vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
 {
     IA64_PSR vpsr;
-    CACHE_LINE_TYPE type;
+    CACHE_LINE_TYPE type=ISIDE_TLB;
     u64 vhpt_adr, gppa;
     ISR misr;
     ia64_rr vrr;
 //    REGS *regs;
-    thash_cb_t *vtlb, *vhpt;
-    thash_data_t *data, me;
+    thash_cb_t *vtlb;
+    thash_data_t *data;
     VCPU *v = current;
     vtlb=vmx_vcpu_get_vtlb(v);
 #ifdef  VTLB_DEBUG
@@ -313,10 +315,14 @@
         return;
     }
 */
+    if(vadr == 0x1ea18c00 ){
+        ia64_clear_ic();
+        while(1);
+    }
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
         if(vec==1){
             physical_itlb_miss(v, vadr);
-            return;
+            return IA64_FAULT;
         }
         if(vec==2){
             
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
@@ -324,7 +330,7 @@
             }else{
                 physical_dtlb_miss(v, vadr);
             }
-            return;
+            return IA64_FAULT;
         }
     }
     vrr = vmx_vcpu_rr(v, vadr);
@@ -334,19 +340,25 @@
 
 //    prepare_if_physical_mode(v);
 
-    if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
+    if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
        gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
         if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
             emulate_io_inst(v, gppa, data->ma);
             return IA64_FAULT;
         }
 
-       if ( data->ps != vrr.ps ) {
+//     if ( data->ps != vrr.ps ) {
+//             machine_tlb_insert(v, data);
+//     }
+//     else {
+/*        if ( data->contiguous&&(!data->tc)){
                machine_tlb_insert(v, data);
-       }
-       else {
-               thash_insert(vtlb->ts->vhpt,data,vadr);
-           }
+        }
+        else{
+ */
+            thash_vhpt_insert(vtlb->ts->vhpt,data,vadr);
+//        }
+//         }
     }else if(type == DSIDE_TLB){
         if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
             if(vpsr.ic){
@@ -366,8 +378,7 @@
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
             vrr=vmx_vcpu_rr(v,vhpt_adr);
-            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
-            if(data){
+            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
                 if(vpsr.ic){
                     vcpu_set_isr(v, misr.val);
                     dtlb_fault(v, vadr);
@@ -410,8 +421,7 @@
         } else{
             vmx_vcpu_thash(v, vadr, &vhpt_adr);
             vrr=vmx_vcpu_rr(v,vhpt_adr);
-            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
-            if(data){
+            if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, 
vhpt_adr, DSIDE_TLB)){
                 if(!vpsr.ic){
                     misr.ni=1;
                 }
@@ -428,6 +438,5 @@
             }
         }
     }
-}
-
-
+    return IA64_NO_FAULT;
+}
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_utility.c
--- a/xen/arch/ia64/vmx/vmx_utility.c   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_utility.c   Thu Mar  2 10:00:49 2006
@@ -307,9 +307,8 @@
             }
             return 0;
     }
-
-
     panic ("Unsupported CR");
+    return 0;
 }
 
 
@@ -600,7 +599,6 @@
 
 void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
 {
-    u64 value;
     ISR isr;
 
     isr.val = set_isr_ei_ni(vcpu);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Mar  2 10:00:49 2006
@@ -35,7 +35,7 @@
 #include <asm/gcc_intrin.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/vmx.h>
-
+#include <asm/vmx_phy_mode.h>
 //u64  fire_itc;
 //u64  fire_itc2;
 //u64  fire_itm;
@@ -66,7 +66,6 @@
 #include <asm/hw_irq.h>
 #include <asm/vmx_pal_vsa.h>
 #include <asm/kregs.h>
-
 //unsigned long last_guest_rsm = 0x0;
 struct guest_psr_bundle{
     unsigned long ip;
@@ -138,7 +137,7 @@
     regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
 
     check_mm_mode_switch(vcpu, old_psr, new_psr);
-    return IA64_NO_FAULT;
+    return ;
 }
 
 /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vmx_virt.c
--- a/xen/arch/ia64/vmx/vmx_virt.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vmx_virt.c      Thu Mar  2 10:00:49 2006
@@ -30,8 +30,9 @@
 #include <asm/vmmu.h>
 #include <asm/vmx_mm_def.h>
 #include <asm/smp.h>
-
+#include <asm/vmx.h>
 #include <asm/virt_event.h>
+#include <asm/vmx_phy_mode.h>
 extern UINT64 privop_trace;
 
 void
@@ -137,6 +138,11 @@
                 *cause=EVENT_BSW_1;
             }
         }
+        case I:
+        case F:
+        case L:
+        case ILLEGAL:
+        break;
     }
 }
 
@@ -157,7 +163,6 @@
 {
     UINT64 tgt = inst.M33.r1;
     UINT64 val;
-    IA64FAULT fault;
 
 /*
     if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
@@ -176,7 +181,6 @@
 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
 {
     UINT64 val;
-    IA64FAULT fault;
     if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
        panic(" get_psr nat bit fault\n");
 
@@ -255,7 +259,6 @@
 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
 {
     u64 r2,r3;
-    ISR isr;
     IA64_PSR  vpsr;
 
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -267,6 +270,7 @@
     }
     
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
 #ifdef  VMAL_NO_FAULT_CHECK
+        ISR isr;
         set_isr_reg_nat_consumption(vcpu,0,0);
         rnat_comsumption(vcpu);
         return IA64_FAULT;
@@ -287,11 +291,11 @@
 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
 {
     u64 r3;
+    IA64_PSR  vpsr;
+
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64_PSR  vpsr;
-
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -321,10 +325,10 @@
 
 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
 {
+    IA64FAULT  ret1, ret2;
+
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64FAULT  ret1, ret2;
-
-#ifdef  VMAL_NO_FAULT_CHECK
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.cpl != 0) {
@@ -373,9 +377,9 @@
 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
-#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -403,9 +407,11 @@
 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
- #ifdef  CHECK_FAULT
+#endif
+#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -433,8 +439,8 @@
 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
-#ifdef  CHECK_FAULT
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
         illegal_op(vcpu);
@@ -477,10 +483,10 @@
 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
 {
     u64 r1,r3;
+#ifdef  CHECK_FAULT
     ISR visr;
     IA64_PSR vpsr;
     int fault=IA64_NO_FAULT;
-#ifdef  CHECK_FAULT
     visr.val=0;
     if(check_target_register(vcpu, inst.M46.r1)){
         set_illegal_op_isr(vcpu);
@@ -514,16 +520,16 @@
 
 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
 {
-    UINT64 fault, itir, ifa, pte, slot;
+    UINT64 itir, ifa, pte, slot;
+    IA64_PSR  vpsr;
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.ic ) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
-    IA64_PSR  vpsr;
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -571,8 +577,10 @@
 
 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
 {
-    UINT64 fault, itir, ifa, pte, slot;
+    UINT64 itir, ifa, pte, slot;
+#ifdef  VMAL_NO_FAULT_CHECK
     ISR isr;
+#endif
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
     if ( vpsr.ic ) {
@@ -628,19 +636,19 @@
 
 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 
*pte)
 {
+    IA64_PSR  vpsr;
+    IA64FAULT  ret1;
+
+    vpsr.val=vmx_vcpu_get_psr(vcpu);
+    if ( vpsr.ic ) {
+        set_illegal_op_isr(vcpu);
+        illegal_op(vcpu);
+        return IA64_FAULT;
+    }
+
+#ifdef  VMAL_NO_FAULT_CHECK
     UINT64 fault;
     ISR isr;
-    IA64_PSR  vpsr;
-    IA64FAULT  ret1;
-
-    vpsr.val=vmx_vcpu_get_psr(vcpu);
-    if ( vpsr.ic ) {
-        set_illegal_op_isr(vcpu);
-        illegal_op(vcpu);
-        return IA64_FAULT;
-    }
-
-#ifdef  VMAL_NO_FAULT_CHECK
     if ( vpsr.cpl != 0) {
         /* Inject Privileged Operation fault into guest */
         set_privileged_operation_isr (vcpu, 0);
@@ -1146,7 +1154,7 @@
 
 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
 {
-    u64 r2,cr3;
+    u64 r2;
 #ifdef  CHECK_FAULT
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -1292,9 +1300,7 @@
 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
 {
        IA64_BUNDLE bundle;
-
-       fetch_code( current,iip, &bundle.i64[0]);
-       fetch_code( current,iip+8, &bundle.i64[1]);
+       fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
        return bundle;
 }
 
@@ -1309,14 +1315,10 @@
 void
 vmx_emulate(VCPU *vcpu, REGS *regs)
 {
-    IA64_BUNDLE bundle;
-    int slot;
-    IA64_SLOT_TYPE slot_type;
     IA64FAULT status;
     INST64 inst;
     UINT64 iip, cause, opcode;
     iip = regs->cr_iip;
-    IA64_PSR vpsr;
     cause = VMX(vcpu,cause);
     opcode = VMX(vcpu,opcode);
 
@@ -1342,6 +1344,10 @@
 #endif
 #ifdef BYPASS_VMAL_OPCODE
     // make a local copy of the bundle containing the privop
+    IA64_BUNDLE bundle;
+    int slot;
+    IA64_SLOT_TYPE slot_type;
+    IA64_PSR vpsr;
     bundle = __vmx_get_domain_bundle(iip);
     slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
     if (!slot) inst.inst = bundle.slot0;
@@ -1483,11 +1489,11 @@
         status=vmx_emul_mov_from_cpuid(vcpu, inst);
         break;
     case EVENT_VMSW:
-        printf ("Unimplemented instruction %d\n", cause);
+        printf ("Unimplemented instruction %ld\n", cause);
        status=IA64_FAULT;
         break;
     default:
-        printf("unknown cause %d, iip: %lx, ipsr: %lx\n", 
cause,regs->cr_iip,regs->cr_ipsr);
+        printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", 
cause,regs->cr_iip,regs->cr_ipsr);
         while(1);
        /* For unknown cause, let hardware to re-execute */
        status=IA64_RETRY;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/vmx/vtlb.c  Thu Mar  2 10:00:49 2006
@@ -28,8 +28,11 @@
 #include <asm/gcc_intrin.h>
 #include <linux/interrupt.h>
 #include <asm/vmx_vcpu.h>
+#include <asm/vmmu.h>
+#include <asm/tlbflush.h>
 #define  MAX_CCH_LENGTH     40
 
+thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *);
 
 static void cch_mem_init(thash_cb_t *hcb)
 {
@@ -50,8 +53,10 @@
 
     if ( (p = hcb->cch_freelist) != NULL ) {
         hcb->cch_freelist = p->next;
-    }
-    return &(p->data);
+        return (thash_data_t *)p;
+    }else{
+        return NULL;
+    }
 }
 
 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
@@ -65,36 +70,38 @@
 /*
  * Check to see if the address rid:va is translated by the TLB
  */
-static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE 
cl)
-{
-    u64  size1,sa1,ea1;
-    if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) )
-        return 0;
-    size1 = PSIZE(tlb->ps);
-    sa1 = tlb->vadr & ~(size1-1);   // mask the low address bits
-    ea1 = sa1 + size1;
-
-    if ( va >= sa1 && (va < ea1 || ea1 == 0) )
+
+static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, 
CACHE_LINE_TYPE cl)
+{
+    u64  size;
+    size = PSIZE(tlb->ps);
+    if(tlb->vadr&(size-1))
+        while(1);
+    if ((tlb->rid == rid) && ((va-tlb->vadr)<size))
         return 1;
     else
         return 0;
 }
 
 /*
- * Only for TLB format.
+ * Only for GUEST TR format.
  */
 static int
-__is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 
sva, u64 eva)
-{
-    uint64_t size1,size2,sa1,ea1,ea2;
-
-    if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl 
) ) {
+__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, 
u64 eva)
+{
+    uint64_t size, sa1, ea1;
+
+//    if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) {
+    if ( entry->invalid || entry->rid != rid ) {
         return 0;
     }
-    size1=PSIZE(entry->ps);
-    sa1 = entry->vadr & ~(size1-1); // mask the low address bits
-    ea1 = sa1 + size1;
-    if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) ) 
+    size = PSIZE(entry->ps);
+    sa1 = entry->vadr;
+    ea1 = sa1 + size -1;
+    eva -= 1;
+    if(sa1&(size-1))
+        while(1);
+    if ( (sva>ea1) || (sa1>eva) )
         return 0;
     else
         return 1;
@@ -103,9 +110,11 @@
 
 static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
 {
+/*
     if ( hcb->remove_notifier ) {
         (hcb->remove_notifier)(hcb,tr);
     }
+*/
     tr->invalid = 1;
 }
 
@@ -142,7 +151,7 @@
     else {
         tr = &DTR(hcb,idx);
     }
-    if ( !INVALID_TLB(tr) ) {
+    if ( !INVALID_TR(tr) ) {
         __rem_tr(hcb, tr);
     }
     __set_tr (tr, insert, idx);
@@ -151,6 +160,7 @@
 /*
  * remove TR entry.
  */
+/*
 static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
 {
     thash_data_t *tr;
@@ -161,17 +171,18 @@
     else {
         tr = &DTR(hcb,idx);
     }
-    if ( !INVALID_TLB(tr) ) {
+    if ( !INVALID_TR(tr) ) {
         __rem_tr(hcb, tr);
     }
 }
-
+ */
 /*
  * Delete an thash entry in collision chain.
  *  prev: the previous entry.
  *  rem: the removed entry.
  */
-static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t 
*rem)
+/*
+static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem)
 {
     //prev->next = rem->next;
     if ( hcb->remove_notifier ) {
@@ -179,6 +190,7 @@
     }
     cch_free (hcb, rem);
 }
+ */
 
 /*
  * Delete an thash entry leading collision chain.
@@ -187,15 +199,16 @@
 {
     thash_data_t *next=hash->next;
 
-    if ( hcb->remove_notifier ) {
+/*    if ( hcb->remove_notifier ) {
         (hcb->remove_notifier)(hcb,hash);
-    }
+    } */
     if ( next != NULL ) {
+        next->len=hash->len-1;
         *hash = *next;
         cch_free (hcb, next);
     }
     else {
-        INVALIDATE_HASH(hcb, hash);
+        INVALIDATE_HASH_HEADER(hcb, hash);
     }
 }
 
@@ -215,8 +228,8 @@
         num = NDTRS;
     }
     for ( i=0; i<num; i++ ) {
-        if ( !INVALID_ENTRY(hcb,&tr[i]) &&
-            __is_translated(&tr[i], rid, va, cl) )
+        if ( !INVALID_TR(&tr[i]) &&
+            __is_tr_translated(&tr[i], rid, va, cl) )
             return &tr[i];
     }
     return NULL;
@@ -227,6 +240,7 @@
  * Find overlap VHPT entry within current collision chain
  * base on internal priv info.
  */
+/*
 static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
 {
     thash_data_t    *cch;
@@ -240,26 +254,27 @@
     }
     return NULL;
 }
-
+*/
 /*
  * Find overlap TLB/VHPT entry within current collision chain
  * base on internal priv info.
  */
+/*
 static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
 {
     thash_data_t    *cch;
     thash_internal_t *priv = &hcb->priv;
 
-    /* Find overlap TLB entry */
+    // Find overlap TLB entry
     for (cch=priv->cur_cch; cch; cch = cch->next) {
         if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
-            __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
-                priv->_curva, priv->_eva) ) {
+            __is_translated( cch, priv->rid, priv->_curva, priv->cl)) {
             return cch;
         }
     }
     return NULL;
 }
+ */
 
 /*
  * Get the machine format of VHPT entry.
@@ -281,26 +296,190 @@
             thash_data_t *tlb, u64 va,
             thash_data_t *vhpt)
 {
-    u64 pages,mfn;
-    ia64_rr vrr;
-
+    u64 padr,pte;
+//    ia64_rr vrr;
     ASSERT ( hcb->ht == THASH_VHPT );
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
-    mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
-    if ( mfn == INVALID_MFN ) return 0;
-
+//    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps;
+    padr += va&((1UL<<tlb->ps)-1);
+    pte=lookup_domain_mpa(current->domain,padr);
+    if((pte>>56))
+        return 0;
     // TODO with machine discontinuous address space issue.
-    vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
+    vhpt->etag = ia64_ttag(va);
     //vhpt->ti = 0;
     vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
     vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
-    vhpt->ppn = mfn;
+    vhpt->ps = PAGE_SHIFT;
+    vhpt->ppn = 
(pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
     vhpt->next = 0;
     return 1;
 }
 
-
+static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
+{
+    thash_data_t *prev, *next;
+    prev = hash; next= hash->next;
+    while(next){
+       prev=next;
+       next=prev->next;
+       cch_free(hcb, prev);
+    }
+    hash->next = NULL;
+    hash->len = 0;
+}
+
+/*  vhpt only has entries with PAGE_SIZE page size */
+
+void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+{
+    thash_data_t   vhpt_entry, *hash_table, *cch;
+//    ia64_rr vrr;
+
+    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
+        return;
+    //panic("Can't convert to machine VHPT entry\n");
+    }
+
+    hash_table = (thash_data_t *)ia64_thash(va);
+    if( INVALID_VHPT(hash_table) ) {
+        *hash_table = vhpt_entry;
+        hash_table->next = 0;
+       return;
+    }
+
+    cch = hash_table;
+    while(cch){
+        if(cch->etag == vhpt_entry.etag){
+            if(cch->ppn == vhpt_entry.ppn)
+                return;
+            else
+                while(1);
+        }
+        cch = cch->next;
+    }
+    if(hash_table->len>=MAX_CCN_DEPTH){
+       thash_remove_cch(hcb, hash_table);
+       cch = cch_alloc(hcb);
+       *cch = *hash_table;
+        *hash_table = vhpt_entry;
+       hash_table->len = 1;
+        hash_table->next = cch;
+       return;
+    }
+       
+    // TODO: Add collision chain length limitation.
+     cch = __alloc_chain(hcb,entry);
+     if(cch == NULL){
+           *hash_table = vhpt_entry;
+            hash_table->next = 0;
+     }else{
+            *cch = *hash_table;
+            *hash_table = vhpt_entry;
+            hash_table->next = cch;
+           hash_table->len = cch->len + 1;
+           cch->len = 0;       
+//            if(hash_table->tag==hash_table->next->tag)
+//                while(1);
+
+    }
+    return /*hash_table*/;
+}
+
+/*
+ *   vhpt lookup
+ */
+
+thash_data_t * vhpt_lookup(u64 va)
+{
+    thash_data_t *hash;
+    u64 tag;
+    hash = (thash_data_t *)ia64_thash(va);
+    tag = ia64_ttag(va);
+    while(hash){
+       if(hash->etag == tag)
+               return hash;
+        hash=hash->next;
+    }
+    return NULL;
+}
+
+
+/*
+ *  purge software guest tlb
+ */
+
+static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
+{
+    thash_data_t *hash_table, *prev, *next;
+    u64 start, end, size, tag, rid;
+    ia64_rr vrr;
+    vrr=vmx_vcpu_rr(current, va);
+    rid = vrr.rid;
+    size = PSIZE(ps);
+    start = va & (-size);
+    end = start + size;
+    while(start < end){
+        hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
+//         tag = ia64_ttag(start);
+        if(!INVALID_TLB(hash_table)){
+       if(hash_table->etag == tag){
+            __rem_hash_head(hcb, hash_table);
+       }
+           else{
+           prev=hash_table;
+               next=prev->next;
+               while(next){
+                       if(next->etag == tag){
+                           prev->next=next->next;
+                           cch_free(hcb,next);
+                           hash_table->len--;
+                           break;
+                       }
+                       prev=next;
+                   next=next->next;
+           }
+       }
+        }
+           start += PAGE_SIZE;
+    }
+//    machine_tlb_purge(va, ps);
+}
+/*
+ *  purge VHPT and machine TLB
+ */
+
+static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
+{
+    thash_data_t *hash_table, *prev, *next;
+    u64 start, end, size, tag;
+    size = PSIZE(ps);
+    start = va & (-size);
+    end = start + size;
+    while(start < end){
+       hash_table = (thash_data_t *)ia64_thash(start);
+           tag = ia64_ttag(start);
+       if(hash_table->etag == tag ){
+            __rem_hash_head(hcb, hash_table);
+       }
+           else{
+           prev=hash_table;
+               next=prev->next;
+               while(next){
+                       if(next->etag == tag){
+                           prev->next=next->next;
+                           cch_free(hcb,next);
+                           hash_table->len--;
+                           break;
+                       }
+                       prev=next;
+                   next=next->next;
+           }
+       }
+           start += PAGE_SIZE;
+    }
+    machine_tlb_purge(va, ps);
+}
 /*
  * Insert an entry to hash table. 
  *    NOTES:
@@ -327,61 +506,96 @@
     entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
     entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
     rep_tr(hcb, entry, idx);
+//    thash_vhpt_insert(hcb->ts->vhpt, entry, va);
     return ;
 }
+
+
+/*
+ * Recycle all collisions chain in VTLB or VHPT.
+ *
+ */
+
+void thash_recycle_cch(thash_cb_t *hcb)
+{
+    thash_data_t    *hash_table;
+
+    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
+    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
+        thash_remove_cch(hcb,hash_table);
+    }
+}
+/*
 thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
 {
     thash_data_t *cch;
-    
+
     cch = cch_alloc(hcb);
     if(cch == NULL){
-        thash_purge_all(hcb);
+        thash_recycle_cch(hcb);
+        cch = cch_alloc(hcb);
     }
     return cch;
 }
- 
+*/
 
 thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
 {
     thash_data_t *cch;
-    
+
     cch = cch_alloc(hcb);
     if(cch == NULL){
         // recycle
-        if ( hcb->recycle_notifier ) {
-                hcb->recycle_notifier(hcb,(u64)entry);
-        }
-        thash_purge_all(hcb);
-//        cch = cch_alloc(hcb);
+//        if ( hcb->recycle_notifier ) {
+//                hcb->recycle_notifier(hcb,(u64)entry);
+//        }
+        thash_recycle_cch(hcb);
+        cch = cch_alloc(hcb);
     }
     return cch;
 }
- 
+
 /*
  * Insert an entry into hash TLB or VHPT.
  * NOTES:
  *  1: When inserting VHPT to thash, "va" is a must covered
  *  address by the inserted machine VHPT entry.
  *  2: The format of entry is always in TLB.
- *  3: The caller need to make sure the new entry will not overlap 
+ *  3: The caller need to make sure the new entry will not overlap
  *     with any existed entry.
  */
 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t    *hash_table, *cch;
-    int flag;
+    /* int flag; */
     ia64_rr vrr;
-    u64 gppn;
-    u64 ppns, ppne;
-
-    hash_table = (hcb->hash_func)(hcb->pta, va);
-    if( INVALID_ENTRY(hcb, hash_table) ) {
+    /* u64 gppn, ppns, ppne; */
+    u64 tag;
+    vrr=vmx_vcpu_rr(current, va);
+    if (vrr.ps != entry->ps) {
+//        machine_tlb_insert(hcb->vcpu, entry);
+       panic("not preferred ps with va: 0x%lx\n", va);
+       return;
+    }
+    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
+    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
+    hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
+    entry->etag = tag;
+    if( INVALID_TLB(hash_table) ) {
         *hash_table = *entry;
         hash_table->next = 0;
     }
+    else if (hash_table->len>=MAX_CCN_DEPTH){
+        thash_remove_cch(hcb, hash_table);
+        cch = cch_alloc(hcb);
+        *cch = *hash_table;
+        *hash_table = *entry;
+        hash_table->len = 1;
+        hash_table->next = cch;
+    }
     else {
         // TODO: Add collision chain length limitation.
-        cch = vtlb_alloc_chain(hcb,entry);
+        cch = __alloc_chain(hcb,entry);
         if(cch == NULL){
             *hash_table = *entry;
             hash_table->next = 0;
@@ -389,22 +603,17 @@
             *cch = *hash_table;
             *hash_table = *entry;
             hash_table->next = cch;
-        }
-    }
+            hash_table->len = cch->len + 1;
+            cch->len = 0;
+        }
+    }
+#if 0
     if(hcb->vcpu->domain->domain_id==0){
        thash_insert(hcb->ts->vhpt, entry, va);
         return;
     }
-
-#if 1
-    vrr=vmx_vcpu_rr(current, va);
-    if (vrr.ps != entry->ps) {
-        machine_tlb_insert(hcb->vcpu, entry);
-       printk("not preferred ps with va: 0x%lx\n", va);
-       return;
-    }
-#endif 
-
+#endif
+/*
     flag = 1;
     gppn = 
(POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
     ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
@@ -413,46 +622,18 @@
         flag = 0;
     if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
        thash_insert(hcb->ts->vhpt, entry, va);
+*/
     return ;
 }
 
-static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
-{
-    thash_data_t   vhpt_entry, *hash_table, *cch;
-    ia64_rr vrr;
-    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
-        panic("Can't convert to machine VHPT entry\n");
-    }
-    hash_table = (hcb->hash_func)(hcb->pta, va);
-    if( INVALID_ENTRY(hcb, hash_table) ) {
-        *hash_table = vhpt_entry;
-        hash_table->next = 0;
-    }
-    else {
-        // TODO: Add collision chain length limitation.
-        cch = __alloc_chain(hcb,entry);
-        if(cch == NULL){
-            *hash_table = vhpt_entry;
-            hash_table->next = 0;
-        }else{
-            *cch = *hash_table;
-            *hash_table = vhpt_entry;
-            hash_table->next = cch;
-            if(hash_table->tag==hash_table->next->tag)
-                while(1);
-
-        }
-
-    }
-    return /*hash_table*/;
-}
-
+
+/*
 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t    *hash_table;
     ia64_rr vrr;
     
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
+    vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr);
     if ( entry->ps != vrr.ps && entry->tc ) {
         panic("Not support for multiple page size now\n");
     }
@@ -461,7 +642,8 @@
     (hcb->ins_hash)(hcb, entry, va);
     
 }
-
+*/
+/*
 static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
 {
     thash_data_t    *hash_table, *p, *q;
@@ -482,6 +664,7 @@
 //            if ( PURGABLE_ENTRY(hcb,q ) ) {
                 p->next = q->next;
                 __rem_chain(hcb, entry);
+                hash_table->len--;
 //            }
             return ;
         }
@@ -489,7 +672,8 @@
     }
     panic("Entry not existed or bad sequence\n");
 }
-
+*/
+/*
 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
 {
     thash_data_t    *hash_table, *p, *q;
@@ -501,7 +685,7 @@
     }
     rem_thash(hcb, entry);
 }    
-
+*/
 int   cch_depth=0;
 /*
  * Purge the collision chain starting from cch.
@@ -509,6 +693,7 @@
  *     For those UN-Purgable entries(FM), this function will return
  * the head of left collision chain.
  */
+/*
 static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
 {
     thash_data_t *next;
@@ -532,6 +717,7 @@
         return cch;
     }
 }
+ */
 
 /*
  * Purge one hash line (include the entry in hash table).
@@ -540,10 +726,11 @@
  *  hash: The head of collision chain (hash table)
  *
  */
+/*
 static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
 {
     if ( INVALID_ENTRY(hcb, hash) ) return;
-    
+
     if ( hash->next ) {
         cch_depth = 0;
         hash->next = thash_rem_cch(hcb, hash->next);
@@ -553,7 +740,7 @@
         __rem_hash_head(hcb, hash);
     }
 }
-
+ */
 
 /*
  * Find an overlap entry in hash table and its collision chain.
@@ -568,14 +755,18 @@
  *    NOTES:
  *
  */
-thash_data_t *thash_find_overlap(thash_cb_t *hcb, 
+
+/*
+thash_data_t *thash_find_overlap(thash_cb_t *hcb,
             thash_data_t *in, search_section_t s_sect)
 {
-    return (hcb->find_overlap)(hcb, in->vadr, 
+    return (hcb->find_overlap)(hcb, in->vadr,
             PSIZE(in->ps), in->rid, in->cl, s_sect);
 }
-
-static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 
+*/
+
+/*
+static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
         u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
@@ -586,9 +777,9 @@
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
     priv->rid = rid;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table = (hcb->hash_func)(hcb->pta, priv->_curva);
+    hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag);
     priv->s_sect = s_sect;
     priv->cl = cl;
     priv->_tr_idx = 0;
@@ -596,8 +787,10 @@
     priv->cur_cch = hash_table;
     return (hcb->next_overlap)(hcb);
 }
-
-static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 
+*/
+
+/*
+static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
         u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
@@ -608,17 +801,43 @@
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
     priv->rid = rid;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
     priv->ps = vrr.ps;
-    hash_table = (hcb->hash_func)( hcb->pta, priv->_curva);
-    tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+    hash_table = ia64_thash(priv->_curva);
+    tag = ia64_ttag(priv->_curva);
     priv->tag = tag;
     priv->hash_base = hash_table;
     priv->cur_cch = hash_table;
     return (hcb->next_overlap)(hcb);
 }
-
-
+*/
+
+
+thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl)
+{
+    thash_data_t    *tr;
+    int  i,num;
+    u64 end;
+
+    if (cl == ISIDE_TLB ) {
+        num = NITRS;
+        tr = &ITR(hcb,0);
+    }
+    else {
+        num = NDTRS;
+        tr = &DTR(hcb,0);
+    }
+    end=data->vadr + PSIZE(data->ps);
+    for (i=0; i<num; i++ ) {
+        if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) {
+            return &tr[i];
+        }
+    }
+    return NULL;
+}
+
+
+/*
 static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *tr;
@@ -634,7 +853,7 @@
         tr = &DTR(hcb,0);
     }
     for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
-        if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
+        if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx],
                 priv->rid, priv->cl,
                 priv->_curva, priv->_eva) ) {
             return &tr[priv->_tr_idx++];
@@ -642,17 +861,19 @@
     }
     return NULL;
 }
+*/
 
 /*
  * Similar with vtlb_next_overlap but find next entry.
  *    NOTES:
  *  Intermediate position information is stored in hcb->priv.
  */
+/*
 static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
-    u64 addr,rr_psize;
+    u64 addr,rr_psize,tag;
     ia64_rr vrr;
 
     if ( priv->s_sect.tr ) {
@@ -661,7 +882,7 @@
         priv->s_sect.tr = 0;
     }
     if ( priv->s_sect.v == 0 ) return NULL;
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
+    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
     rr_psize = PSIZE(vrr.ps);
 
     while ( priv->_curva < priv->_eva ) {
@@ -673,12 +894,15 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
+        priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
 }
-
+ */
+
+
+/*
 static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
 {
     thash_data_t    *ovl;
@@ -686,7 +910,7 @@
     u64 addr,rr_psize;
     ia64_rr vrr;
 
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
+    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
     rr_psize = PSIZE(vrr.ps);
 
     while ( priv->_curva < priv->_eva ) {
@@ -698,13 +922,13 @@
             }
         }
         priv->_curva += rr_psize;
-        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
-        priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+        priv->hash_base = ia64_thash(priv->_curva);
+        priv->tag = ia64_ttag(priv->_curva);
         priv->cur_cch = priv->hash_base;
     }
     return NULL;
 }
-
+*/
 
 /*
  * Find and purge overlap entries in hash table and its collision chain.
@@ -716,7 +940,7 @@
  *    NOTES:
  *
  */
-void thash_purge_entries(thash_cb_t *hcb, 
+void thash_purge_entries(thash_cb_t *hcb,
             thash_data_t *in, search_section_t p_sect)
 {
     return thash_purge_entries_ex(hcb, in->rid, in->vadr,
@@ -724,10 +948,11 @@
 }
 
 void thash_purge_entries_ex(thash_cb_t *hcb,
-            u64 rid, u64 va, u64 ps, 
-            search_section_t p_sect, 
+            u64 rid, u64 va, u64 ps,
+            search_section_t p_sect,
             CACHE_LINE_TYPE cl)
 {
+/*
     thash_data_t    *ovl;
 
     ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
@@ -735,19 +960,22 @@
         (hcb->rem_hash)(hcb, ovl);
         ovl = (hcb->next_overlap)(hcb);
     };
+ */
+    vtlb_purge(hcb, va, ps);
+    vhpt_purge(hcb->ts->vhpt, va, ps);
 }
 
 /*
  * Purge overlap TCs and then insert the new entry to emulate itc ops.
  *    Notes: Only TC entry can purge and insert.
  */
-void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
-{
-    thash_data_t    *ovl;
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va)
+{
+    /* thash_data_t    *ovl; */
     search_section_t sections;
 
 #ifdef   XEN_DEBUGGER
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+    vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr);
        if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
                panic ("Oops, wrong call for purge_and_insert\n");
                return;
@@ -757,10 +985,14 @@
     in->ppn = PAGEALIGN(in->ppn, in->ps-12);
     sections.tr = 0;
     sections.tc = 1;
+/*
     ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
                                 in->rid, in->cl, sections);
     if(ovl)
         (hcb->rem_hash)(hcb, ovl);
+ */
+    vtlb_purge(hcb, va, in->ps);
+    vhpt_purge(hcb->ts->vhpt, va, in->ps);
 #ifdef   XEN_DEBUGGER
     ovl = (hcb->next_overlap)(hcb);
     if ( ovl ) {
@@ -768,7 +1000,9 @@
                return;
     }
 #endif
-    (hcb->ins_hash)(hcb, in, in->vadr);
+    if(in->ps!=PAGE_SHIFT)
+        vtlb_insert(hcb, in, va);
+    thash_vhpt_insert(hcb->ts->vhpt, in, va);
 }
 /*
  * Purge one hash line (include the entry in hash table).
@@ -777,6 +1011,7 @@
  *  hash: The head of collision chain (hash table)
  *
  */
+/*
 static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
 {
     if ( INVALID_ENTRY(hcb, hash) ) return;
@@ -790,6 +1025,16 @@
     // Then hash table itself.
     INVALIDATE_HASH(hcb, hash);
 }
+*/
+
+
+
+
+
+
+
+
+
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
  *
@@ -799,7 +1044,10 @@
 void thash_purge_all(thash_cb_t *hcb)
 {
     thash_data_t    *hash_table;
-    
+    /* thash_data_t    *entry; */
+    thash_cb_t  *vhpt;
+    /* u64 i, start, end; */
+
 #ifdef  VTLB_DEBUG
        extern u64  sanity_check;
     static u64 statistics_before_purge_all=0;
@@ -808,18 +1056,35 @@
         check_vtlb_sanity(hcb);
     }
 #endif
+    ASSERT ( hcb->ht == THASH_TLB );
 
     hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
     for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        thash_purge_line(hcb, hash_table);
-    }
-    if(hcb->ht== THASH_TLB) {
-        hcb = hcb->ts->vhpt;
-        hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-        for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-            thash_purge_line(hcb, hash_table);
-        }
-    }
+        INVALIDATE_TLB_HEADER(hash_table);
+    }
+    cch_mem_init (hcb);
+
+    vhpt = hcb->ts->vhpt;
+    hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
+    for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
+        INVALIDATE_VHPT_HEADER(hash_table);
+    }
+    cch_mem_init (vhpt);
+    
+/*
+    entry = &hcb->ts->itr[0];
+    for(i=0; i< (NITRS+NDTRS); i++){
+        if(!INVALID_TLB(entry)){
+            start=entry->vadr & (-PSIZE(entry->ps));
+            end = start + PSIZE(entry->ps);
+            while(start<end){
+                thash_vhpt_insert(vhpt, entry, start);
+                start += PAGE_SIZE;
+            }
+        }
+        entry++;
+    }
+*/
     local_flush_tlb_all();
 }
 
@@ -845,20 +1110,21 @@
     u64     tag;
     ia64_rr vrr;
    
-    ASSERT ( hcb->ht == THASH_VTLB );
+    ASSERT ( hcb->ht == THASH_TLB );
     
     cch = __vtr_lookup(hcb, rid, va, cl);;
     if ( cch ) return cch;
 
-    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
-    hash_table = (hcb->hash_func)( hcb->pta, va);
+    vrr = vmx_vcpu_rr(hcb->vcpu,va);
+    hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
 
     if ( INVALID_ENTRY(hcb, hash_table ) )
         return NULL;
 
         
     for (cch=hash_table; cch; cch = cch->next) {
-        if ( __is_translated(cch, rid, va, cl) )
+//        if ( __is_translated(cch, rid, va, cl) )
+        if(cch->etag == tag)
             return cch;
     }
     return NULL;
@@ -871,6 +1137,7 @@
  *          1: failure
  *          0: success
  */
+/*
 int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int 
lock)
 {
        thash_data_t    *ovl;
@@ -900,6 +1167,7 @@
        }
        return 1;
 }
+*/
 
 /*
  * Notifier when TLB is deleted from hash table and its collision chain.
@@ -911,15 +1179,17 @@
  *  2: The format of entry is always in TLB.
  *
  */
-void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
-{
-    thash_cb_t  *vhpt;
-    search_section_t    s_sect;
+//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
+//{
+//    vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps);
+//    thash_cb_t  *vhpt;
     
-    s_sect.v = 0;
-    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
-    machine_tlb_purge(entry->vadr, entry->ps);
-}
+//    search_section_t    s_sect;
+    
+//    s_sect.v = 0;
+//    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
+//    machine_tlb_purge(entry->vadr, entry->ps);
+//}
 
 /*
  * Initialize internal control data before service.
@@ -930,34 +1200,33 @@
 
     cch_mem_init (hcb);
     hcb->magic = THASH_CB_MAGIC;
-    hcb->pta.val = hcb->hash;
+    hcb->pta.val = (unsigned long)hcb->hash;
     hcb->pta.vf = 1;
     hcb->pta.ve = 1;
     hcb->pta.size = sz;
-    hcb->get_rr_fn = vmmu_get_rr;
+//    hcb->get_rr_fn = vmmu_get_rr;
     ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
     if ( hcb->ht == THASH_TLB ) {
-        hcb->remove_notifier =  tlb_remove_notifier;
-        hcb->find_overlap = vtlb_find_overlap;
-        hcb->next_overlap = vtlb_next_overlap;
-        hcb->rem_hash = rem_vtlb;
-        hcb->ins_hash = vtlb_insert;
+//        hcb->remove_notifier =  NULL;        //tlb_remove_notifier;
+//        hcb->find_overlap = vtlb_find_overlap;
+//        hcb->next_overlap = vtlb_next_overlap;
+//        hcb->rem_hash = rem_vtlb;
+//        hcb->ins_hash = vtlb_insert;
         __init_tr(hcb);
     }
     else {
-        hcb->remove_notifier =  NULL;
-        hcb->find_overlap = vhpt_find_overlap;
-        hcb->next_overlap = vhpt_next_overlap;
-        hcb->rem_hash = rem_thash;
-        hcb->ins_hash = vhpt_insert;
+//        hcb->remove_notifier =  NULL;
+//        hcb->find_overlap = vhpt_find_overlap;
+//        hcb->next_overlap = vhpt_next_overlap;
+//        hcb->rem_hash = rem_thash;
+//        hcb->ins_hash = thash_vhpt_insert;
     }
     hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
-    
+
     for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
-        INVALIDATE_HASH(hcb,hash_table);
-    }
-}
-#define VTLB_DEBUG
+        INVALIDATE_HASH_HEADER(hcb,hash_table);
+    }
+}
 #ifdef  VTLB_DEBUG
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
@@ -967,7 +1236,7 @@
     thash_data_t    *ovl;
     search_section_t s_sect;
     u64     num=0;
-    
+
     s_sect.v = 0;
     for (cch=hash; cch; cch=cch->next) {
         ovl = thash_find_overlap(vhpt, cch, s_sect);
@@ -997,7 +1266,7 @@
     search_section_t s_sect;
     thash_cb_t *vhpt = vtlb->ts->vhpt;
     u64   invalid_ratio;
-    
+ 
     if ( sanity_check == 0 ) return;
     sanity_check --;
     s_sect.v = 0;
@@ -1010,7 +1279,7 @@
 //    vb2 = vb1 + vtlb->hash_sz;
     hash_num = vhpt->hash_sz / sizeof(thash_data_t);
 //    printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
-    printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n", 
+    printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n", 
                 vtlb, vtlb->hash,vtlb->hash_sz,
                 vhpt, vhpt->hash, vhpt->hash_sz);
     //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
@@ -1018,9 +1287,9 @@
     for ( i=0; i < 
sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
        cch_length_statistics[i] = 0;
     }
-    
+
     local_irq_save(psr);
-    
+
     hash = vhpt->hash;
     for (i=0; i < hash_num; i++) {
         if ( !INVALID_ENTRY(vhpt, hash) ) {
@@ -1043,7 +1312,7 @@
         }
         hash ++;
     }
-    printf("Done vtlb entry check, hash=%lp\n", hash);
+    printf("Done vtlb entry check, hash=%p\n", hash);
     printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", 
check_ok_num,check_invalid);
     invalid_ratio = 1000*check_invalid / hash_num;
     printf("%02ld.%01ld%% entries are invalid\n", 
@@ -1072,7 +1341,7 @@
         if ( !INVALID_ENTRY(vhpt, hash) ) {
             for ( cch= hash; cch; cch=cch->next) {
                 if ( !cch->checked ) {
-                    printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, 
cch);
+                    printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
                     check_fail_num ++;
                 }
                 else {
@@ -1103,18 +1372,18 @@
     static u64  dump_vtlb=0;
     thash_data_t  *hash, *cch, *tr;
     u64     hash_num,i;
-    
+
     if ( dump_vtlb == 0 ) return;
     dump_vtlb --;
     hash_num = vtlb->hash_sz / sizeof(thash_data_t);
     hash = vtlb->hash;
-    
+
     printf("Dump vTC\n");
     for ( i = 0; i < hash_num; i++ ) {
         if ( !INVALID_ENTRY(vtlb, hash) ) {
-            printf("VTLB at hash=%lp\n", hash);
+            printf("VTLB at hash=%p\n", hash);
             for (cch=hash; cch; cch=cch->next) {
-                printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+                printf("Entry %p va=%lx ps=%d rid=%d\n",
                     cch, cch->vadr, cch->ps, cch->rid);
             }
         }
@@ -1123,13 +1392,13 @@
     printf("Dump vDTR\n");
     for (i=0; i<NDTRS; i++) {
         tr = &DTR(vtlb,i);
-        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+        printf("Entry %p va=%lx ps=%d rid=%d\n",
                     tr, tr->vadr, tr->ps, tr->rid);
     }
     printf("Dump vITR\n");
     for (i=0; i<NITRS; i++) {
         tr = &ITR(vtlb,i);
-        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+        printf("Entry %p va=%lx ps=%d rid=%d\n",
                     tr, tr->vadr, tr->ps, tr->rid);
     }
     printf("End of vTLB dump\n");
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/acpi.c
--- a/xen/arch/ia64/xen/acpi.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/acpi.c  Thu Mar  2 10:00:49 2006
@@ -178,7 +178,7 @@
 
        if (lapic->address) {
                iounmap((void *) ipi_base_addr);
-               ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
+               ipi_base_addr = (void __iomem *) ioremap(lapic->address, 0);
        }
        return 0;
 }
@@ -265,7 +265,9 @@
        acpi_table_entry_header *header, const unsigned long end)
 {
        struct acpi_table_plat_int_src *plintsrc;
+#if 0
        int vector;
+#endif
 
        plintsrc = (struct acpi_table_plat_int_src *) header;
 
@@ -369,9 +371,9 @@
        /* Get base address of IPI Message Block */
 
        if (acpi_madt->lapic_address)
-               ipi_base_addr = (unsigned long) 
ioremap(acpi_madt->lapic_address, 0);
-
-       printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
+               ipi_base_addr = (void __iomem *) 
ioremap(acpi_madt->lapic_address, 0);
+
+       printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
 
        acpi_madt_oem_check(acpi_madt->header.oem_id,
                acpi_madt->header.oem_table_id);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/dom0_ops.c      Thu Mar  2 10:00:49 2006
@@ -17,6 +17,7 @@
 #include <xen/trace.h>
 #include <xen/console.h>
 #include <public/sched_ctl.h>
+#include <asm/vmx.h>
 
 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
 {
@@ -143,7 +144,7 @@
             n += j;
         }
 
-        free_xenheap_page((unsigned long)l_arr);
+        free_xenheap_page((void *) l_arr);
 
         put_domain(d);
     }
@@ -160,7 +161,6 @@
         unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
         unsigned long mfn;
         unsigned long *buffer = op->u.getmemlist.buffer;
-        struct page *page;
 
         ret = -EINVAL;
         if ( d != NULL )
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/dom_fw.c        Thu Mar  2 10:00:49 2006
@@ -39,7 +39,8 @@
                while(1);
        }
        if (imva - imva_fw_base > PAGE_SIZE) {
-               printf("dom_pa: bad offset! imva=%p, imva_fw_base=%p 
(spinning...)\n",imva,imva_fw_base);
+               printf("dom_pa: bad offset! imva=0x%lx, imva_fw_base=0x%lx 
(spinning...)\n",
+                       imva, imva_fw_base);
                while(1);
        }
        return dom_fw_base_mpa + (imva - imva_fw_base);
@@ -48,31 +49,29 @@
 // builds a hypercall bundle at domain physical address
 void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned 
long hypercall)
 {
-       unsigned long imva;
+       unsigned long *imva;
 
        if (d == dom0) paddr += dom0_start;
-       imva = domain_mpa_to_imva(d,paddr);
-       build_hypercall_bundle(imva,d->arch.breakimm,hypercall,1);
+       imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
+       build_hypercall_bundle(imva, d->arch.breakimm, hypercall, 1);
 }
 
 
 // builds a hypercall bundle at domain physical address
 static void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, 
unsigned long hypercall,unsigned long ret)
 {
-       unsigned long imva;
-
-       imva = domain_mpa_to_imva(d,paddr);
-       build_hypercall_bundle(imva,d->arch.breakimm,hypercall,ret);
+       unsigned long *imva;
+
+       imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
+       build_hypercall_bundle(imva, d->arch.breakimm, hypercall, ret);
 }
 
 static void dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr)
 {
        unsigned long *imva;
 
-       imva = (unsigned long *)domain_mpa_to_imva(d,paddr);
-
-       build_pal_hypercall_bundles (imva, d->arch.breakimm,
-                                     FW_HYPERCALL_PAL_CALL);
+       imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
+       build_pal_hypercall_bundles(imva, d->arch.breakimm, 
FW_HYPERCALL_PAL_CALL);
 }
 
 
@@ -85,15 +84,13 @@
 
        dom_fw_base_mpa = 0;
        if (d == dom0) dom_fw_base_mpa += dom0_start;
-       imva_fw_base = domain_mpa_to_imva(d,dom_fw_base_mpa);
-       bp = dom_fw_init(d,args,arglen,imva_fw_base,PAGE_SIZE);
-       return dom_pa((unsigned long)bp);
+       imva_fw_base = domain_mpa_to_imva(d, dom_fw_base_mpa);
+       bp = dom_fw_init(d, args, arglen, (char *) imva_fw_base, PAGE_SIZE);
+       return dom_pa((unsigned long) bp);
 }
 
 
 /* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
-
-#define MB     (1024*1024UL)
 
 #define NUM_EFI_SYS_TABLES 6
 # define NUM_MEM_DESCS 5
@@ -256,7 +253,8 @@
                        if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
                            (in4 > 1) ||
                            (in2 > 8) || (in2 & (in2-1)))
-                               printf("*** 
SAL_PCI_CONF_WRITE?!?(adr=%p,typ=%p,sz=%p,val=%p)\n",in1,in4,in2,in3);
+                               printf("*** 
SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
+                                       in1,in4,in2,in3);
                        // note that args are in a different order!!
                        status = ia64_sal_pci_config_write(in1,in4,in2,in3);
                }
@@ -296,7 +294,7 @@
        long status = -1;
 
        if (running_on_sim) return pal_emulator_static(index);
-       printk("xen_pal_emulator: index=%d\n",index);
+       printk("xen_pal_emulator: index=%lu\n", index);
        // pal code must be mapped by a TR when pal is called, however
        // calls are rare enough that we will map it lazily rather than
        // at every context switch
@@ -312,10 +310,16 @@
                status = ia64_pal_proc_get_features(&r9,&r10,&r11);
                break;
            case PAL_BUS_GET_FEATURES:
-               status = ia64_pal_bus_get_features(&r9,&r10,&r11);
+               status = ia64_pal_bus_get_features(
+                               (pal_bus_features_u_t *) &r9,
+                               (pal_bus_features_u_t *) &r10,
+                               (pal_bus_features_u_t *) &r11);
                break;
            case PAL_FREQ_RATIOS:
-               status = ia64_pal_freq_ratios(&r9,&r10,&r11);
+               status = ia64_pal_freq_ratios(
+                               (struct pal_freq_ratio *) &r9,
+                               (struct pal_freq_ratio *) &r10,
+                               (struct pal_freq_ratio *) &r11);
                break;
            case PAL_PTCE_INFO:
                {
@@ -326,7 +330,9 @@
                }
                break;
            case PAL_VERSION:
-               status = ia64_pal_version(&r9,&r10);
+               status = ia64_pal_version(
+                               (pal_version_u_t *) &r9,
+                               (pal_version_u_t *) &r10);
                break;
            case PAL_VM_PAGE_SIZE:
                status = ia64_pal_vm_page_size(&r9,&r10);
@@ -341,13 +347,21 @@
                // FIXME: what should xen return for these, figure out later
                // For now, linux does the right thing if pal call fails
                // In particular, rid_size must be set properly!
-               //status = ia64_pal_vm_summary(&r9,&r10);
+               //status = ia64_pal_vm_summary(
+               //              (pal_vm_info_1_u_t *) &r9,
+               //              (pal_vm_info_2_u_t *) &r10);
                break;
            case PAL_RSE_INFO:
-               status = ia64_pal_rse_info(&r9,&r10);
+               status = ia64_pal_rse_info(
+                               &r9,
+                               (pal_hints_u_t *) &r10);
                break;
            case PAL_VM_INFO:
-               status = ia64_pal_vm_info(in1,in2,&r9,&r10);
+               status = ia64_pal_vm_info(
+                               in1,
+                               in2,
+                               (pal_tc_info_u_t *) &r9,
+                               &r10);
                break;
            case PAL_REGISTER_INFO:
                status = ia64_pal_register_info(in1,&r9,&r10);
@@ -360,11 +374,12 @@
            case PAL_PERF_MON_INFO:
                {
                        unsigned long pm_buffer[16];
-                       int i;
-                       status = ia64_pal_perf_mon_info(pm_buffer,&r9);
+                       status = ia64_pal_perf_mon_info(
+                                       pm_buffer,
+                                       (pal_perf_mon_info_u_t *) &r9);
                        if (status != 0) {
                                while(1)
-                               printk("PAL_PERF_MON_INFO fails 
ret=%d\n",status);
+                               printk("PAL_PERF_MON_INFO fails ret=%ld\n", 
status);
                                break;
                        }
                        if (copy_to_user((void __user *)in1,pm_buffer,128)) {
@@ -409,7 +424,7 @@
                            domain_shutdown (current->domain, 0);
                    break;
            default:
-               printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %d!!!!\n",
+               printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
                                index);
                break;
        }
@@ -434,7 +449,7 @@
 
 /* Provide only one LP to guest */
 static int 
-acpi_update_lsapic (acpi_table_entry_header *header)
+acpi_update_lsapic (acpi_table_entry_header *header, const unsigned long end)
 {
        struct acpi_table_lsapic *lsapic;
 
@@ -529,8 +544,8 @@
        strcpy(xsdt->asl_compiler_id, "XEN");
        xsdt->asl_compiler_revision = (XEN_VERSION<<16)|(XEN_SUBVERSION);
 
-       xsdt->table_offset_entry[0] = dom_pa(fadt);
-       tables->madt_ptr = dom_pa(madt);
+       xsdt->table_offset_entry[0] = dom_pa((unsigned long) fadt);
+       tables->madt_ptr = dom_pa((unsigned long) madt);
 
        xsdt->checksum = generate_acpi_checksum(xsdt, xsdt->length);
 
@@ -547,8 +562,8 @@
        facs->version = 1;
        facs->length = sizeof(struct facs_descriptor_rev2);
 
-       fadt->xfirmware_ctrl = dom_pa(facs);
-       fadt->Xdsdt = dom_pa(dsdt);
+       fadt->xfirmware_ctrl = dom_pa((unsigned long) facs);
+       fadt->Xdsdt = dom_pa((unsigned long) dsdt);
 
        /*
         * All of the below FADT entries are filled it to prevent warnings
@@ -558,15 +573,15 @@
        fadt->pm1_evt_len = 4;
        fadt->xpm1a_evt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm1a_evt_blk.register_bit_width = 8;
-       fadt->xpm1a_evt_blk.address = dom_pa(&tables->pm1a_evt_blk);
+       fadt->xpm1a_evt_blk.address = dom_pa((unsigned long) 
&tables->pm1a_evt_blk);
        fadt->pm1_cnt_len = 1;
        fadt->xpm1a_cnt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm1a_cnt_blk.register_bit_width = 8;
-       fadt->xpm1a_cnt_blk.address = dom_pa(&tables->pm1a_cnt_blk);
+       fadt->xpm1a_cnt_blk.address = dom_pa((unsigned long) 
&tables->pm1a_cnt_blk);
        fadt->pm_tm_len = 4;
        fadt->xpm_tmr_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
        fadt->xpm_tmr_blk.register_bit_width = 8;
-       fadt->xpm_tmr_blk.address = dom_pa(&tables->pm_tmr_blk);
+       fadt->xpm_tmr_blk.address = dom_pa((unsigned long) &tables->pm_tmr_blk);
 
        fadt->checksum = generate_acpi_checksum(fadt, fadt->length);
 
@@ -575,7 +590,7 @@
        strcpy(rsdp->oem_id, "XEN");
        rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
        rsdp->length = sizeof(struct acpi20_table_rsdp);
-       rsdp->xsdt_address = dom_pa(xsdt);
+       rsdp->xsdt_address = dom_pa((unsigned long) xsdt);
 
        rsdp->checksum = generate_acpi_checksum(rsdp,
                                                ACPI_RSDP_CHECKSUM_LENGTH);
@@ -640,7 +655,7 @@
        unsigned long maxmem = (d->max_pages - d->arch.sys_pgnr) * PAGE_SIZE;
        const unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
 
-#      define MAKE_MD(typ, attr, start, end, abs)      \       
+#      define MAKE_MD(typ, attr, start, end, abs)      \
        do {                                            \
                md = efi_memmap + i++;                  \
                md->type = typ;                         \
@@ -669,7 +684,7 @@
        sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
        efi_memmap  = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
        bp          = (void *) cp; cp += sizeof(*bp);
-       pfn        = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
+       pfn         = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
        cmd_line    = (void *) cp;
 
        if (args) {
@@ -690,19 +705,19 @@
        cp += sizeof(FW_VENDOR) + (8-((unsigned long)cp & 7)); // round to 
64-bit boundary
 
        memcpy(fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
-       efi_systab->fw_vendor = dom_pa(fw_vendor);
+       efi_systab->fw_vendor = dom_pa((unsigned long) fw_vendor);
        
        efi_systab->fw_revision = 1;
-       efi_systab->runtime = (void *) dom_pa(efi_runtime);
+       efi_systab->runtime = (void *) dom_pa((unsigned long) efi_runtime);
        efi_systab->nr_tables = NUM_EFI_SYS_TABLES;
-       efi_systab->tables = dom_pa(efi_tables);
+       efi_systab->tables = dom_pa((unsigned long) efi_tables);
 
        efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
        efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
        efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
 #define EFI_HYPERCALL_PATCH(tgt,call) do { \
     
dom_efi_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call); \
-    tgt = dom_pa(pfn); \
+    tgt = dom_pa((unsigned long) pfn); \
     *pfn++ = FW_HYPERCALL_##call##_PADDR + start_mpaddr; \
     *pfn++ = 0; \
     } while (0)
@@ -719,7 +734,7 @@
        EFI_HYPERCALL_PATCH(efi_runtime->reset_system,EFI_RESET_SYSTEM);
 
        efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
-       efi_tables[0].table = dom_pa(sal_systab);
+       efi_tables[0].table = dom_pa((unsigned long) sal_systab);
        for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
                efi_tables[i].guid = NULL_GUID;
                efi_tables[i].table = 0;
@@ -730,7 +745,7 @@
                if (efi.mps) {
                        efi_tables[i].guid = MPS_TABLE_GUID;
                        efi_tables[i].table = __pa(efi.mps);
-                       printf(" MPS=%0xlx",efi_tables[i].table);
+                       printf(" MPS=0x%lx",efi_tables[i].table);
                        i++;
                }
 
@@ -739,25 +754,25 @@
                if (efi.acpi20) {
                        efi_tables[i].guid = ACPI_20_TABLE_GUID;
                        efi_tables[i].table = __pa(efi.acpi20);
-                       printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
+                       printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
                        i++;
                }
                if (efi.acpi) {
                        efi_tables[i].guid = ACPI_TABLE_GUID;
                        efi_tables[i].table = __pa(efi.acpi);
-                       printf(" ACPI=%0xlx",efi_tables[i].table);
+                       printf(" ACPI=0x%lx",efi_tables[i].table);
                        i++;
                }
                if (efi.smbios) {
                        efi_tables[i].guid = SMBIOS_TABLE_GUID;
                        efi_tables[i].table = __pa(efi.smbios);
-                       printf(" SMBIOS=%0xlx",efi_tables[i].table);
+                       printf(" SMBIOS=0x%lx",efi_tables[i].table);
                        i++;
                }
                if (efi.hcdp) {
                        efi_tables[i].guid = HCDP_TABLE_GUID;
                        efi_tables[i].table = __pa(efi.hcdp);
-                       printf(" HCDP=%0xlx",efi_tables[i].table);
+                       printf(" HCDP=0x%lx",efi_tables[i].table);
                        i++;
                }
                printf("\n");
@@ -773,8 +788,8 @@
                        dom_fw_fake_acpi(acpi_tables);
 
                        efi_tables[i].guid = ACPI_20_TABLE_GUID;
-                       efi_tables[i].table = dom_pa(acpi_tables);
-                       printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
+                       efi_tables[i].table = dom_pa((unsigned long) 
acpi_tables);
+                       printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
                        i++;
                }
        }
@@ -850,12 +865,12 @@
                MAKE_MD(EFI_RESERVED_TYPE,0,0,0,0);
        }
 
-       bp->efi_systab = dom_pa(fw_mem);
-       bp->efi_memmap = dom_pa(efi_memmap);
+       bp->efi_systab = dom_pa((unsigned long) fw_mem);
+       bp->efi_memmap = dom_pa((unsigned long) efi_memmap);
        bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t);
        bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
        bp->efi_memdesc_version = 1;
-       bp->command_line = dom_pa(cmd_line);
+       bp->command_line = dom_pa((unsigned long) cmd_line);
        bp->console_info.num_cols = 80;
        bp->console_info.num_rows = 25;
        bp->console_info.orig_x = 0;
@@ -870,7 +885,7 @@
                bp->initrd_start = d->arch.initrd_start;
                bp->initrd_size  = d->arch.initrd_len;
        }
-       printf(" initrd start %0xlx", bp->initrd_start);
-       printf(" initrd size %0xlx", bp->initrd_size);
+       printf(" initrd start 0x%lx", bp->initrd_start);
+       printf(" initrd size 0x%lx\n", bp->initrd_size);
        return bp;
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/domain.c        Thu Mar  2 10:00:49 2006
@@ -45,7 +45,9 @@
 #include <asm/vmx.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_vpd.h>
+#include <asm/vmx_phy_mode.h>
 #include <asm/pal.h>
+#include <asm/vhpt.h>
 #include <public/hvm/ioreq.h>
 
 #define CONFIG_DOMAIN0_CONTIGUOUS
@@ -63,8 +65,16 @@
 extern int readelfimage_base_and_size(char *, unsigned long,
                      unsigned long *, unsigned long *, unsigned long *);
 
-unsigned long map_domain_page0(struct domain *);
 extern unsigned long dom_fw_setup(struct domain *, char *, int);
+/* FIXME: where these declarations should be there ? */
+extern void domain_pend_keyboard_interrupt(int);
+extern long platform_is_hp_ski(void);
+extern unsigned long allocate_metaphysical_rr(void);
+extern int allocate_rid_range(struct domain *, unsigned long);
+extern void sync_split_caches(void);
+extern void init_all_rr(struct vcpu *);
+extern void serial_input_init(void);
+
 static void init_switch_stack(struct vcpu *v);
 
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
@@ -251,9 +261,12 @@
        return 0;
 
 fail_nomem:
-       free_xenheap_page(d->shared_info);
-       xfree(d->arch.mm);
-       pgd_free(d->arch.mm->pgd);
+       if (d->arch.mm->pgd != NULL)
+           pgd_free(d->arch.mm->pgd);
+       if (d->arch.mm != NULL)
+           xfree(d->arch.mm);
+       if (d->shared_info != NULL)
+           free_xenheap_page(d->shared_info);
        return -ENOMEM;
 }
 
@@ -272,8 +285,6 @@
 {
        struct pt_regs *regs = vcpu_regs (v);
        struct domain *d = v->domain;
-       int i, rc, ret;
-       unsigned long progress = 0;
 
        printf("arch_set_info_guest\n");
        if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
@@ -301,7 +312,7 @@
        v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
        if ( c->vcpu.privregs && copy_from_user(v->arch.privregs,
                           c->vcpu.privregs, sizeof(mapped_regs_t))) {
-           printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", 
c->vcpu.privregs);
+           printk("Bad ctxt address in arch_set_info_guest: %p\n", 
c->vcpu.privregs);
            return -EFAULT;
        }
 
@@ -328,9 +339,7 @@
 {
        struct domain *d = v->domain;
        struct pt_regs *regs;
-       struct ia64_boot_param *bp;
        extern char saved_command_line[];
-
 
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
        if (d == dom0) start_pc += dom0_start;
@@ -378,18 +387,19 @@
        }
 }
 
-static struct page * map_new_domain0_page(unsigned long mpaddr)
+static struct page * assign_new_domain0_page(unsigned long mpaddr)
 {
        if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
-               printk("map_new_domain0_page: bad domain0 mpaddr %p!\n",mpaddr);
-printk("map_new_domain0_page: 
start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
+               printk("assign_new_domain0_page: bad domain0 mpaddr 
0x%lx!\n",mpaddr);
+               printk("assign_new_domain0_page: start=0x%lx,end=0x%lx!\n",
+                       dom0_start, dom0_start+dom0_size);
                while(1);
        }
        return mfn_to_page((mpaddr >> PAGE_SHIFT));
 }
 
 /* allocate new page for domain and map it to the specified metaphysical addr 
*/
-struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
+struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        struct page *p = (struct page *)0;
@@ -397,10 +407,9 @@
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
-extern unsigned long vhpt_paddr, vhpt_pend;
 
        if (!mm->pgd) {
-               printk("map_new_domain_page: domain pgd must exist!\n");
+               printk("assign_new_domain_page: domain pgd must exist!\n");
                return(p);
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -419,7 +428,7 @@
        pte = pte_offset_map(pmd, mpaddr);
        if (pte_none(*pte)) {
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
-               if (d == dom0) p = map_new_domain0_page(mpaddr);
+               if (d == dom0) p = assign_new_domain0_page(mpaddr);
                else
 #endif
                {
@@ -428,21 +437,23 @@
                        if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
                }
                if (unlikely(!p)) {
-printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
+                       printf("assign_new_domain_page: Can't alloc!!!! 
Aaaargh!\n");
                        return(p);
                }
-if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
-  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
-}
+               if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr)
+                            && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) {
+                       printf("assign_new_domain_page: reassigned vhpt page 
%lx!!\n",
+                               page_to_maddr(p));
+               }
                set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_new_domain_page: mpaddr %lx already 
mapped!\n",mpaddr);
        return p;
 }
 
 /* map a physical address to the specified metaphysical addr */
-void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
+void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -451,7 +462,7 @@
        pte_t *pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        pgd = pgd_offset(mm,mpaddr);
@@ -472,11 +483,14 @@
                set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+    if((physaddr>>PAGE_SHIFT)<max_page){
+        *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
+    }
 }
 #if 0
 /* map a physical address with specified I/O flag */
-void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long 
flags)
+void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned 
long flags)
 {
        struct mm_struct *mm = d->arch.mm;
        pgd_t *pgd;
@@ -486,7 +500,7 @@
        pte_t io_pte;
 
        if (!mm->pgd) {
-               printk("map_domain_page: domain pgd must exist!\n");
+               printk("assign_domain_page: domain pgd must exist!\n");
                return;
        }
        ASSERT(flags & GPFN_IO_MASK);
@@ -509,7 +523,7 @@
                pte_val(io_pte) = flags;
                set_pte(pte, io_pte);
        }
-       else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
+       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
 #endif
 void mpafoo(unsigned long mpaddr)
@@ -530,8 +544,8 @@
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
        if (d == dom0) {
                if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
-                       //printk("lookup_domain_mpa: bad dom0 mpaddr 
%p!\n",mpaddr);
-//printk("lookup_domain_mpa: 
start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
+                       //printk("lookup_domain_mpa: bad dom0 mpaddr 
0x%lx!\n",mpaddr);
+                       //printk("lookup_domain_mpa: 
start=0x%lx,end=0x%lx!\n",dom0_start,dom0_start+dom0_size);
                        mpafoo(mpaddr);
                }
                pte_t pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
@@ -557,10 +571,10 @@
        }
        /* if lookup fails and mpaddr is "legal", "create" the page */
        if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
-               if (map_new_domain_page(d,mpaddr)) goto tryagain;
-       }
-       printk("lookup_domain_mpa: bad mpa %p (> %p\n",
-               mpaddr,d->max_pages<<PAGE_SHIFT);
+               if (assign_new_domain_page(d,mpaddr)) goto tryagain;
+       }
+       printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
+               mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
        mpafoo(mpaddr);
        return 0;
 }
@@ -573,7 +587,7 @@
        unsigned long imva;
 
        pte &= _PAGE_PPN_MASK;
-       imva = __va(pte);
+       imva = (unsigned long) __va(pte);
        imva |= mpaddr & ~PAGE_MASK;
        return(imva);
 }
@@ -602,13 +616,13 @@
 {
        int remain;
 
-       if (IS_XEN_ADDRESS(dom0,src)) {
+       if (IS_XEN_ADDRESS(dom0,(unsigned long) src)) {
                memcpy(dst,src,size);
        }
        else {
                printf("About to call __copy_from_user(%p,%p,%d)\n",
                        dst,src,size);
-               while (remain = __copy_from_user(dst,src,size)) {
+               while ((remain = __copy_from_user(dst,src,size)) != 0) {
                        printf("incomplete user copy, %d remain of %d\n",
                                remain,size);
                        dst += size - remain; src += size - remain;
@@ -619,16 +633,15 @@
 
 void loaddomainelfimage(struct domain *d, unsigned long image_start)
 {
-       char *elfbase = image_start;
+       char *elfbase = (char *) image_start;
        //Elf_Ehdr *ehdr = (Elf_Ehdr *)image_start;
        Elf_Ehdr ehdr;
        Elf_Phdr phdr;
-       int h, filesz, memsz, paddr;
+       int h, filesz, memsz;
        unsigned long elfaddr, dom_mpaddr, dom_imva;
        struct page *p;
-       unsigned long pteval;
   
-       copy_memory(&ehdr,image_start,sizeof(Elf_Ehdr));
+       copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr));
        for ( h = 0; h < ehdr.e_phnum; h++ ) {
                copy_memory(&phdr,elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
                sizeof(Elf_Phdr));
@@ -637,7 +650,7 @@
                continue;
        }
        filesz = phdr.p_filesz; memsz = phdr.p_memsz;
-       elfaddr = elfbase + phdr.p_offset;
+       elfaddr = (unsigned long) elfbase + phdr.p_offset;
        dom_mpaddr = phdr.p_paddr;
 //printf("p_offset: %x, size=%x\n",elfaddr,filesz);
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
@@ -646,37 +659,31 @@
                        printf("Domain0 doesn't fit in allocated space!\n");
                        while(1);
                }
-               dom_imva = __va(dom_mpaddr + dom0_start);
-               copy_memory(dom_imva,elfaddr,filesz);
-               if (memsz > filesz) memset(dom_imva+filesz,0,memsz-filesz);
+               dom_imva = (unsigned long) __va(dom_mpaddr + dom0_start);
+               copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
+               if (memsz > filesz) memset((void *) dom_imva+filesz, 0, 
memsz-filesz);
 //FIXME: This test for code seems to find a lot more than objdump -x does
                if (phdr.p_flags & PF_X) privify_memory(dom_imva,filesz);
        }
        else
 #endif
        while (memsz > 0) {
-#ifdef DOMU_AUTO_RESTART
-               pteval = lookup_domain_mpa(d,dom_mpaddr);
-               if (pteval) dom_imva = __va(pteval & _PFN_MASK);
-               else { printf("loaddomainelfimage: BAD!\n"); while(1); }
-#else
-               p = map_new_domain_page(d,dom_mpaddr);
+               p = assign_new_domain_page(d,dom_mpaddr);
                if (unlikely(!p)) BUG();
-               dom_imva = __va(page_to_maddr(p));
-#endif
+               dom_imva = (unsigned long) __va(page_to_maddr(p));
                if (filesz > 0) {
                        if (filesz >= PAGE_SIZE)
-                               copy_memory(dom_imva,elfaddr,PAGE_SIZE);
+                               copy_memory((void *) dom_imva, (void *) 
elfaddr, PAGE_SIZE);
                        else { // copy partial page, zero the rest of page
-                               copy_memory(dom_imva,elfaddr,filesz);
-                               memset(dom_imva+filesz,0,PAGE_SIZE-filesz);
+                               copy_memory((void *) dom_imva, (void *) 
elfaddr, filesz);
+                               memset((void *) dom_imva+filesz, 0, 
PAGE_SIZE-filesz);
                        }
 //FIXME: This test for code seems to find a lot more than objdump -x does
                        if (phdr.p_flags & PF_X)
                                privify_memory(dom_imva,PAGE_SIZE);
                }
                else if (memsz > 0) // always zero out entire page
-                       memset(dom_imva,0,PAGE_SIZE);
+                       memset((void *) dom_imva, 0, PAGE_SIZE);
                memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
                elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
        }
@@ -691,33 +698,33 @@
        copy_memory(&ehdr,elfbase,sizeof(Elf_Ehdr));
 
        if ( !elf_sanity_check(&ehdr) ) {
-           printk("ELF sanity check failed.\n");
-           return -EINVAL;
+               printk("ELF sanity check failed.\n");
+               return -EINVAL;
        }
 
        if ( (ehdr.e_phoff + (ehdr.e_phnum * ehdr.e_phentsize)) > elfsize )
        {
-           printk("ELF program headers extend beyond end of image.\n");
-           return -EINVAL;
+               printk("ELF program headers extend beyond end of image.\n");
+               return -EINVAL;
        }
 
        if ( (ehdr.e_shoff + (ehdr.e_shnum * ehdr.e_shentsize)) > elfsize )
        {
-           printk("ELF section headers extend beyond end of image.\n");
-           return -EINVAL;
+               printk("ELF section headers extend beyond end of image.\n");
+               return -EINVAL;
        }
 
 #if 0
        /* Find the section-header strings table. */
        if ( ehdr.e_shstrndx == SHN_UNDEF )
        {
-           printk("ELF image has no section-header strings table 
(shstrtab).\n");
-           return -EINVAL;
+               printk("ELF image has no section-header strings table 
(shstrtab).\n");
+               return -EINVAL;
        }
 #endif
 
        *entry = ehdr.e_entry;
-printf("parsedomainelfimage: entry point = %p\n",*entry);
+       printf("parsedomainelfimage: entry point = 0x%lx\n", *entry);
 
        return 0;
 }
@@ -729,22 +736,21 @@
        if (platform_is_hp_ski()) {
        dom0_size = 128*1024*1024; //FIXME: Should be configurable
        }
-       printf("alloc_dom0: starting (initializing %d 
MB...)\n",dom0_size/(1024*1024));
+       printf("alloc_dom0: starting (initializing %lu 
MB...)\n",dom0_size/(1024*1024));
  
-     /* FIXME: The first trunk (say 256M) should always be assigned to
-      * Dom0, since Dom0's physical == machine address for DMA purpose.
-      * Some old version linux, like 2.4, assumes physical memory existing
-      * in 2nd 64M space.
-      */
-     dom0_start = alloc_boot_pages(
-         dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
-     dom0_start <<= PAGE_SHIFT;
+       /* FIXME: The first trunk (say 256M) should always be assigned to
+        * Dom0, since Dom0's physical == machine address for DMA purpose.
+        * Some old version linux, like 2.4, assumes physical memory existing
+        * in 2nd 64M space.
+        */
+       dom0_start = alloc_boot_pages(dom0_size >> PAGE_SHIFT, dom0_align >> 
PAGE_SHIFT);
+       dom0_start <<= PAGE_SHIFT;
        if (!dom0_start) {
-       printf("construct_dom0: can't allocate contiguous memory size=%p\n",
+       printf("alloc_dom0: can't allocate contiguous memory size=%lu\n",
                dom0_size);
        while(1);
        }
-       printf("alloc_dom0: dom0_start=%p\n",dom0_start);
+       printf("alloc_dom0: dom0_start=0x%lx\n", dom0_start);
 #else
        dom0_start = 0;
 #endif
@@ -772,13 +778,8 @@
                       unsigned long initrd_start, unsigned long initrd_len,
                       char *cmdline)
 {
-       char *dst;
        int i, rc;
-       unsigned long pfn, mfn;
-       unsigned long nr_pt_pages;
-       unsigned long count;
        unsigned long alloc_start, alloc_end;
-       struct page_info *page = NULL;
        start_info_t *si;
        struct vcpu *v = d->vcpu[0];
 
@@ -788,16 +789,23 @@
        unsigned long pkern_entry;
        unsigned long pkern_end;
        unsigned long pinitrd_start = 0;
-       unsigned long ret, progress = 0;
+       unsigned long pstart_info;
+#if 0
+       char *dst;
+       unsigned long nr_pt_pages;
+       unsigned long count;
+#endif
+#ifdef VALIDATE_VT
+       unsigned long mfn;
+       struct page_info *page = NULL;
+#endif
 
 //printf("construct_dom0: starting\n");
 
-#ifndef CLONE_DOMAIN0
        /* Sanity! */
        BUG_ON(d != dom0);
        BUG_ON(d->vcpu[0] == NULL);
        BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
-#endif
 
        memset(&dsi, 0, sizeof(struct domain_setup_info));
 
@@ -846,20 +854,26 @@
              pinitrd_start=(dom0_start+dom0_size) -
                           (PAGE_ALIGN(initrd_len) + 4*1024*1024);
 
-             memcpy(__va(pinitrd_start),initrd_start,initrd_len);
+             memcpy(__va(pinitrd_start), (void *) initrd_start, initrd_len);
+             pstart_info = PAGE_ALIGN(pinitrd_start + initrd_len);
+        } else {
+             pstart_info = PAGE_ALIGN(pkern_end);
         }
 
        printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
               " Kernel image:  %lx->%lx\n"
               " Entry address: %lx\n"
-               " Init. ramdisk: %lx len %lx\n",
-               pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len);
+              " Init. ramdisk: %lx len %lx\n"
+              " Start info.:   %lx->%lx\n",
+              pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len,
+              pstart_info, pstart_info + PAGE_SIZE);
 
        if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
        {
            printk("Initial guest OS requires too much space\n"
                   "(%luMB is greater than %luMB limit)\n",
-                  (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
+                  (pkern_end-pkern_start)>>20,
+                  (unsigned long) (d->max_pages<<PAGE_SHIFT)>>20);
            return -ENOMEM;
        }
 
@@ -908,9 +922,9 @@
 
 
        /* Set up start info area. */
-       si = (start_info_t *)alloc_xenheap_page();
+       d->shared_info->arch.start_info_pfn = pstart_info >> PAGE_SHIFT;
+       si = __va(pstart_info);
        memset(si, 0, PAGE_SIZE);
-       d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
        sprintf(si->magic, "xen-%i.%i-ia64", XEN_VERSION, XEN_SUBVERSION);
        si->nr_pages     = d->tot_pages;
 
@@ -962,79 +976,10 @@
        sync_split_caches();
 
        // FIXME: Hack for keyboard input
-#ifdef CLONE_DOMAIN0
-if (d == dom0)
-#endif
        serial_input_init();
-       if (d == dom0) {
-               VCPU(v, delivery_mask[0]) = -1L;
-               VCPU(v, delivery_mask[1]) = -1L;
-               VCPU(v, delivery_mask[2]) = -1L;
-               VCPU(v, delivery_mask[3]) = -1L;
-       }
-       else __set_bit(0x30, VCPU(v, delivery_mask));
 
        return 0;
 }
-
-// FIXME: When dom0 can construct domains, this goes away (or is rewritten)
-int construct_domU(struct domain *d,
-                  unsigned long image_start, unsigned long image_len,
-                  unsigned long initrd_start, unsigned long initrd_len,
-                  char *cmdline)
-{
-       int i, rc;
-       struct vcpu *v = d->vcpu[0];
-       unsigned long pkern_entry;
-
-#ifndef DOMU_AUTO_RESTART
-       BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
-#endif
-
-       printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
-
-       d->max_pages = dom0_size/PAGE_SIZE;     // FIXME: use dom0 size
-       // FIXME: use domain0 command line
-       rc = parsedomainelfimage(image_start, image_len, &pkern_entry);
-       printk("parsedomainelfimage returns %d\n",rc);
-       if ( rc != 0 ) return rc;
-
-       /* Mask all upcalls... */
-       for ( i = 0; i < MAX_VIRT_CPUS; i++ )
-               d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
-
-       /* Copy the OS image. */
-       printk("calling loaddomainelfimage(%p,%p)\n",d,image_start);
-       loaddomainelfimage(d,image_start);
-       printk("loaddomainelfimage returns\n");
-
-       set_bit(_VCPUF_initialised, &v->vcpu_flags);
-
-       printk("calling new_thread, entry=%p\n",pkern_entry);
-#ifdef DOMU_AUTO_RESTART
-       v->domain->arch.image_start = image_start;
-       v->domain->arch.image_len = image_len;
-       v->domain->arch.entry = pkern_entry;
-#endif
-       new_thread(v, pkern_entry, 0, 0);
-       printk("new_thread returns\n");
-       sync_split_caches();
-       __set_bit(0x30, VCPU(v, delivery_mask));
-
-       return 0;
-}
-
-#ifdef DOMU_AUTO_RESTART
-void reconstruct_domU(struct vcpu *v)
-{
-       /* re-copy the OS image to reset data values to original */
-       printk("reconstruct_domU: restarting domain %d...\n",
-               v->domain->domain_id);
-       loaddomainelfimage(v->domain,v->domain->arch.image_start);
-       new_thread(v, v->domain->arch.entry, 0, 0);
-       sync_split_caches();
-}
-#endif
 
 void machine_restart(char * __unused)
 {
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/hypercall.c     Thu Mar  2 10:00:49 2006
@@ -9,24 +9,66 @@
 #include <xen/config.h>
 #include <xen/sched.h>
 #include <xen/hypercall.h>
+#include <xen/multicall.h>
 
 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
 #include <asm/sal.h>   /* FOR struct ia64_sal_retval */
 
 #include <asm/vcpu.h>
 #include <asm/dom_fw.h>
+#include <public/dom0_ops.h>
+#include <public/event_channel.h>
 #include <public/memory.h>
 #include <public/sched.h>
 
 extern unsigned long translate_domain_mpaddr(unsigned long);
+/* FIXME: where these declarations should be there ? */
+extern int dump_privop_counts_to_user(char *, int);
+extern int zero_privop_counts_to_user(char *, int);
 
 unsigned long idle_when_pending = 0;
 unsigned long pal_halt_light_count = 0;
+
+hypercall_t ia64_hypercall_table[] =
+       {
+       (hypercall_t)do_ni_hypercall,           /* do_set_trap_table */         
/*  0 */
+       (hypercall_t)do_ni_hypercall,           /* do_mmu_update */
+       (hypercall_t)do_ni_hypercall,           /* do_set_gdt */
+       (hypercall_t)do_ni_hypercall,           /* do_stack_switch */
+       (hypercall_t)do_ni_hypercall,           /* do_set_callbacks */
+       (hypercall_t)do_ni_hypercall,           /* do_fpu_taskswitch */         
/*  5 */
+       (hypercall_t)do_ni_hypercall,           /* do_sched_op */
+       (hypercall_t)do_dom0_op,
+       (hypercall_t)do_ni_hypercall,           /* do_set_debugreg */
+       (hypercall_t)do_ni_hypercall,           /* do_get_debugreg */
+       (hypercall_t)do_ni_hypercall,           /* do_update_descriptor */      
/* 10 */
+       (hypercall_t)do_ni_hypercall,           /* do_ni_hypercall */
+       (hypercall_t)do_memory_op,
+       (hypercall_t)do_multicall,
+       (hypercall_t)do_ni_hypercall,           /* do_update_va_mapping */
+       (hypercall_t)do_ni_hypercall,           /* do_set_timer_op */           
/* 15 */
+       (hypercall_t)do_event_channel_op,
+       (hypercall_t)do_xen_version,
+       (hypercall_t)do_console_io,
+       (hypercall_t)do_ni_hypercall,           /* do_physdev_op */
+       (hypercall_t)do_grant_table_op,                                         
/* 20 */
+       (hypercall_t)do_ni_hypercall,           /* do_vm_assist */
+       (hypercall_t)do_ni_hypercall,           /* 
do_update_va_mapping_otherdomain */
+       (hypercall_t)do_ni_hypercall,           /* (x86 only) */
+       (hypercall_t)do_ni_hypercall,           /* do_vcpu_op */
+       (hypercall_t)do_ni_hypercall,           /* (x86_64 only) */             
/* 25 */
+       (hypercall_t)do_ni_hypercall,           /* do_mmuext_op */
+       (hypercall_t)do_ni_hypercall,           /* do_acm_op */
+       (hypercall_t)do_ni_hypercall,           /* do_nmi_op */
+       (hypercall_t)do_ni_hypercall,           /*  */
+       (hypercall_t)do_ni_hypercall,           /*  */                          
/* 30 */
+       (hypercall_t)do_ni_hypercall            /*  */
+       };
 
 int
 ia64_hypercall (struct pt_regs *regs)
 {
-       struct vcpu *v = (struct domain *) current;
+       struct vcpu *v = current;
        struct sal_ret_values x;
        unsigned long *tv, *tc;
        int pi;
@@ -94,23 +136,16 @@
                        printf("(by dom0)\n ");
                        (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
                }
-#ifdef DOMU_AUTO_RESTART
-               else {
-                       reconstruct_domU(current);
-                       return 0;  // don't increment ip!
-               }
-#else  
                printf("(not supported for non-0 domain)\n");
                regs->r8 = EFI_UNSUPPORTED;
-#endif
                break;
            case FW_HYPERCALL_EFI_GET_TIME:
-               tv = vcpu_get_gr(v,32);
-               tc = vcpu_get_gr(v,33);
+               tv = (unsigned long *) vcpu_get_gr(v,32);
+               tc = (unsigned long *) vcpu_get_gr(v,33);
                //printf("efi_get_time(%p,%p) called...",tv,tc);
-               tv = __va(translate_domain_mpaddr(tv));
-               if (tc) tc = __va(translate_domain_mpaddr(tc));
-               regs->r8 = (*efi.get_time)(tv,tc);
+               tv = (unsigned long *) __va(translate_domain_mpaddr((unsigned 
long) tv));
+               if (tc) tc = (unsigned long *) 
__va(translate_domain_mpaddr((unsigned long) tc));
+               regs->r8 = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t 
*) tc);
                //printf("and returns %lx\n",regs->r8);
                break;
            case FW_HYPERCALL_EFI_SET_TIME:
@@ -131,23 +166,23 @@
                break;
            case 0xffff:
                regs->r8 = dump_privop_counts_to_user(
-                       vcpu_get_gr(v,32),
-                       vcpu_get_gr(v,33));
+                       (char *) vcpu_get_gr(v,32),
+                       (int) vcpu_get_gr(v,33));
                break;
            case 0xfffe:
                regs->r8 = zero_privop_counts_to_user(
-                       vcpu_get_gr(v,32),
-                       vcpu_get_gr(v,33));
+                       (char *) vcpu_get_gr(v,32),
+                       (int) vcpu_get_gr(v,33));
                break;
            case __HYPERVISOR_dom0_op:
-               regs->r8 = do_dom0_op(regs->r14);
+               regs->r8 = do_dom0_op((struct dom0_op *) regs->r14);
                break;
 
            case __HYPERVISOR_memory_op:
                /* we don't handle reservations; just return success */
                {
                    struct xen_memory_reservation reservation;
-                   void *arg = regs->r15;
+                   void *arg = (void *) regs->r15;
 
                    switch(regs->r14) {
                    case XENMEM_increase_reservation:
@@ -159,31 +194,35 @@
                            regs->r8 = reservation.nr_extents;
                        break;
                    default:
-                       regs->r8 = do_memory_op(regs->r14, regs->r15);
+                       regs->r8 = do_memory_op((int) regs->r14, (void 
*)regs->r15);
                        break;
                    }
                }
                break;
 
            case __HYPERVISOR_event_channel_op:
-               regs->r8 = do_event_channel_op(regs->r14);
+               regs->r8 = do_event_channel_op((struct evtchn_op *) regs->r14);
                break;
 
            case __HYPERVISOR_grant_table_op:
-               regs->r8 = do_grant_table_op(regs->r14, regs->r15, regs->r16);
+               regs->r8 = do_grant_table_op((unsigned int) regs->r14, (void *) 
regs->r15, (unsigned int) regs->r16);
                break;
 
            case __HYPERVISOR_console_io:
-               regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
+               regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, 
(char *) regs->r16);
                break;
 
            case __HYPERVISOR_xen_version:
-               regs->r8 = do_xen_version(regs->r14, regs->r15);
+               regs->r8 = do_xen_version((int) regs->r14, (void *) regs->r15);
+               break;
+
+           case __HYPERVISOR_multicall:
+               regs->r8 = do_multicall((struct multicall_entry *) regs->r14, 
(unsigned int) regs->r15);
                break;
 
            default:
-               printf("unknown hypercall %x\n", regs->r2);
-               regs->r8 = (unsigned long)-1;
+               printf("unknown hypercall %lx\n", regs->r2);
+               regs->r8 = do_ni_hypercall();
        }
        return 1;
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/hyperprivop.S   Thu Mar  2 10:00:49 2006
@@ -1336,7 +1336,7 @@
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
        ld8 r22=[r22];;
        adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
-       ld4 r23=[r22];;
+       ld8 r23=[r22];;
        mov rr[r0]=r23;;
        srlz.i;;
        st4 [r20]=r0 ;;
@@ -1372,7 +1372,7 @@
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
        ld8 r22=[r22];;
        adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
-       ld4 r23=[r22];;
+       ld8 r23=[r22];;
        mov rr[r0]=r23;;
        srlz.i;;
        adds r21=1,r0 ;;
@@ -1733,7 +1733,7 @@
        dep r23=r28,r23,16,8;;
        dep r23=r29,r23,8,8
        cmp.eq p6,p0=r25,r0;;   // if rr0, save for metaphysical
-(p6)   st4 [r24]=r23
+(p6)   st8 [r24]=r23
        mov rr[r8]=r23;;
        // done, mosey on back
 1:     mov r24=cr.ipsr
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/irq.c   Thu Mar  2 10:00:49 2006
@@ -129,7 +129,9 @@
 }
 #endif
 
+#ifndef XEN
 static void register_irq_proc (unsigned int irq);
+#endif
 
 /*
  * Special irq handlers.
@@ -286,7 +288,9 @@
 int handle_IRQ_event(unsigned int irq,
                struct pt_regs *regs, struct irqaction *action)
 {
+#ifndef XEN
        int status = 1; /* Force the "do bottom halves" bit */
+#endif
        int retval = 0;
 
 #ifndef XEN
@@ -657,8 +661,10 @@
        if (!action)
                return -ENOMEM;
 
+#ifdef XEN
+       action->handler = (void *) handler;
+#else
        action->handler = handler;
-#ifndef XEN
        action->flags = irqflags;
        action->mask = 0;
 #endif
@@ -698,7 +704,9 @@
 #endif
 {
        irq_desc_t *desc;
+#ifndef XEN
        struct irqaction **p;
+#endif
        unsigned long flags;
 
        if (irq >= NR_IRQS)
@@ -755,7 +763,8 @@
  * disabled.
  */
 
-static DECLARE_MUTEX(probe_sem);
+#ifndef XEN
+static int DECLARE_MUTEX(probe_sem);
 
 /**
  *     probe_irq_on    - begin an interrupt autodetect
@@ -765,7 +774,6 @@
  *
  */
 
-#ifndef XEN
 unsigned long probe_irq_on(void)
 {
        unsigned int i;
@@ -936,7 +944,9 @@
 
 int setup_irq(unsigned int irq, struct irqaction * new)
 {
+#ifndef XEN
        int shared = 0;
+#endif
        unsigned long flags;
        struct irqaction *old, **p;
        irq_desc_t *desc = irq_descp(irq);
@@ -1371,7 +1381,7 @@
     return 0;
 }
 
-int pirq_guest_bind(struct vcpu *d, int irq, int will_share)
+int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
 {
     irq_desc_t         *desc = &irq_desc[irq];
     irq_guest_action_t *action;
@@ -1431,7 +1441,7 @@
         goto out;
     }
 
-    action->guest[action->nr_guests++] = d;
+    action->guest[action->nr_guests++] = v->domain;
 
  out:
     spin_unlock_irqrestore(&desc->lock, flags);
@@ -1480,9 +1490,11 @@
 #ifdef XEN
 #ifdef IA64
 // this is a temporary hack until real console input is implemented
+extern void domain_pend_keyboard_interrupt(int irq);
 irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs 
*regs)
 {
        domain_pend_keyboard_interrupt(irq);
+       return 0;
 }
 
 void serial_input_init(void)
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S   Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/ivt.S   Thu Mar  2 10:00:49 2006
@@ -298,12 +298,83 @@
        DBG_FAULT(2)
 #ifdef XEN
        VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
+#if VHPT_ENABLED
+       // XXX TODO optimization
+       mov r31=pr                              // save predicates
+       mov r30=cr.ipsr
+       mov r28=cr.iip                  
+       mov r16=cr.ifa                          // get virtual address
+       mov r17=cr.isr                          // save predicates
+       ;;
+
+       extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2  // extract psr.cpl
+       ;; 
+       cmp.ne p6, p0 = r0, r18                 // cpl == 0?
+(p6)   br.cond.sptk 2f
+
+       // is speculation bit on?
+       tbit.nz p7,p0=r17,IA64_ISR_SP_BIT       
+       ;; 
+(p7)   br.cond.spnt 2f
+
+       // is non-access bit on?
+       tbit.nz p8,p0=r17,IA64_ISR_NA_BIT       
+       ;;
+(p8)   br.cond.spnt 2f
+
+       // cr.isr.code == IA64_ISR_CODE_LFETCH?
+       and r18=IA64_ISR_CODE_MASK,r17          // get the isr.code field
+       ;; 
+       cmp.eq p9,p0=IA64_ISR_CODE_LFETCH,r18   // check isr.code field
+(p9)   br.cond.spnt 2f
+
+       // Is the faulted iip in vmm area?
+       // check [59:58] bit
+       // 00, 11: guest
+       // 01, 10: vmm
+       extr.u r19 = r28, 58, 2
+       ;; 
+       cmp.eq p10, p0 = 0x0, r19
+(p10)  br.cond.sptk 2f
+       cmp.eq p11, p0 = 0x3, r19
+(p11)  br.cond.sptk 2f
+
+       // Is the faulted address is in the identity mapping area?
+       // 0xf000... or 0xe8000...
+       extr.u r20 = r16, 59, 5
+       ;; 
+       cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
+(p12)  br.cond.spnt 1f
+       cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
+(p13)  br.cond.sptk 2f
+
+1:
+       // xen identity mappin area.
+       movl r24=PAGE_KERNEL
+       movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+       ;;
+       shr.u r26=r16,55        // move address bit 59 to bit 4
+       and r25=r25,r16         // clear ed, reserved bits, and PTE control bits
+       ;;
+       and r26=0x10,r26        // bit 4=address-bit(59)
+       ;; 
+       or r25=r25,r24          // insert PTE control bits into r25
+       ;;
+       or r25=r25,r26          // set bit 4 (uncached) if the access was to 
region 6
+       ;;
+       itc.d r25               // insert the TLB entry
+       mov pr=r31,-1
+       rfi
+
+2:
+#endif 
 #ifdef VHPT_GLOBAL
 //     br.cond.sptk page_fault
        br.cond.sptk fast_tlb_miss_reflect
        ;;
 #endif
-#endif
+       mov r29=b0                              // save b0
+#else  
        /*
         * The DTLB handler accesses the L3 PTE via the virtually mapped linear
         * page table.  If a nested TLB miss occurs, we switch into physical
@@ -313,6 +384,7 @@
        mov r16=cr.ifa                          // get virtual address
        mov r29=b0                              // save b0
        mov r31=pr                              // save predicates
+#endif
 dtlb_fault:
        mov r17=cr.iha                          // get virtual address of L3 PTE
        movl r30=1f                             // load nested fault 
continuation point
@@ -399,6 +471,9 @@
        ;;
        or r19=r19,r18          // set bit 4 (uncached) if the access was to 
region 6
 (p8)   br.cond.spnt page_fault
+#ifdef XEN
+       FORCE_CRASH
+#endif 
        ;;
        itc.i r19               // insert the TLB entry
        mov pr=r31,-1
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/mm_init.c       Thu Mar  2 10:00:49 2006
@@ -47,6 +47,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/mca.h>
+#include <asm/vhpt.h>
 
 #ifndef XEN
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -63,7 +64,7 @@
 EXPORT_SYMBOL(vmem_map);
 #endif
 
-static int pgt_cache_water[2] = { 25, 50 };
+// static int pgt_cache_water[2] = { 25, 50 };
 
 struct page *zero_page_memmap_ptr;             /* map entry for zero page */
 EXPORT_SYMBOL(zero_page_memmap_ptr);
@@ -222,7 +223,7 @@
 ia64_set_rbs_bot (void)
 {
 #ifdef XEN
-       unsigned stack_size = MAX_USER_STACK_SIZE;
+       unsigned long stack_size = MAX_USER_STACK_SIZE;
 #else
        unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
 #endif
@@ -279,7 +280,7 @@
 #endif
 }
 
-setup_gate (void)
+void setup_gate (void)
 {
        printk("setup_gate not-implemented.\n");
 }
@@ -287,7 +288,10 @@
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
-       unsigned long psr, pta, impl_va_bits;
+       unsigned long psr, impl_va_bits;
+#if 0
+       unsigned long pta;
+#endif
        extern void __devinit tlb_init (void);
        int cpu;
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/pcdp.c
--- a/xen/arch/ia64/xen/pcdp.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/pcdp.c  Thu Mar  2 10:00:49 2006
@@ -71,7 +71,9 @@
 {
        struct pcdp *pcdp;
        struct pcdp_uart *uart;
+#ifndef XEN
        struct pcdp_device *dev, *end;
+#endif
        int i, serial = 0;
 
        pcdp = efi.hcdp;
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/privop.c        Thu Mar  2 10:00:49 2006
@@ -11,7 +11,12 @@
 #include <asm/processor.h>
 #include <asm/delay.h> // Debug only
 #include <asm/dom_fw.h>
+#include <asm/vhpt.h>
 //#include <debug.h>
+
+/* FIXME: where these declarations should be there ? */
+extern int dump_reflect_counts(char *);
+extern void zero_reflect_counts(void);
 
 long priv_verbose=0;
 
@@ -524,7 +529,7 @@
 Privileged operation decode and dispatch routines
 **************************************************************************/
 
-IA64_SLOT_TYPE slot_types[0x20][3] = {
+static const IA64_SLOT_TYPE slot_types[0x20][3] = {
        {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
        {M, I, ILLEGAL}, {M, I, ILLEGAL},
        {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
@@ -544,7 +549,7 @@
 // pointer to privileged emulation function
 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
 
-PPEFCN Mpriv_funcs[64] = {
+static const PPEFCN Mpriv_funcs[64] = {
   priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
@@ -600,7 +605,7 @@
        if (__copy_from_user(&bundle,iip,sizeof(bundle)))
 #endif
        {
-//printf("*** priv_handle_op: privop bundle @%p not mapped, retrying\n",iip);
+//printf("*** priv_handle_op: privop bundle at 0x%lx not mapped, 
retrying\n",iip);
                return vcpu_force_data_miss(vcpu,regs->cr_iip);
        }
 #if 0
@@ -613,8 +618,8 @@
 #endif
        if (privop_trace) {
                static long i = 400;
-               //if (i > 0) printf("privop @%p\n",iip);
-               if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
+               //if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
+               if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, 
itc=%lx, itm=%lx\n",
                        iip,ia64_get_itc(),ia64_get_itm());
                i--;
        }
@@ -727,7 +732,7 @@
                break;
        }
         //printf("We who are about do die salute you\n");
-       printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d 
(type=%d), ipsr=%p\n",
+       printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot 
%d (type=%d), ipsr=0x%lx\n",
                 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
         //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
         //thread_mozambique("privop fault\n");
@@ -768,7 +773,7 @@
                (void)vcpu_increment_iip(vcpu);
        }
        if (fault == IA64_ILLOP_FAULT)
-               printf("priv_emulate: priv_handle_op fails, isr=%p\n",isr);
+               printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
        return fault;
 }
 
@@ -794,11 +799,10 @@
 #define HYPERPRIVOP_SET_KR             0x12
 #define HYPERPRIVOP_MAX                        0x12
 
-char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
+static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
        0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
        "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
-       "=rr", "rr=", "kr=",
-       0
+       "=rr", "rr=", "kr="
 };
 
 unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
@@ -809,15 +813,14 @@
 int
 ia64_hyperprivop(unsigned long iim, REGS *regs)
 {
-       struct vcpu *v = (struct domain *) current;
-       INST64 inst;
+       struct vcpu *v = current;
        UINT64 val;
        UINT64 itir, ifa;
 
 // FIXME: Handle faults appropriately for these
        if (!iim || iim > HYPERPRIVOP_MAX) {
                printf("bad hyperprivop; ignored\n");
-               printf("iim=%d, iip=%p\n",iim,regs->cr_iip);
+               printf("iim=%lx, iip=0x%lx\n", iim, regs->cr_iip);
                return 1;
        }
        slow_hyperpriv_cnt[iim]++;
@@ -895,7 +898,7 @@
 Privileged operation instrumentation routines
 **************************************************************************/
 
-char *Mpriv_str[64] = {
+static const char * const Mpriv_str[64] = {
   "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
   "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
   "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
@@ -915,7 +918,7 @@
 };
 
 #define RS "Rsvd"
-char *cr_str[128] = {
+static const char * const cr_str[128] = {
   "dcr","itm","iva",RS,RS,RS,RS,RS,
   "pta",RS,RS,RS,RS,RS,RS,RS,
   "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
@@ -946,48 +949,48 @@
        for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
        s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
        if (privcnt.mov_to_ar_imm)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_imm,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_imm,
                        "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
        if (privcnt.mov_to_ar_reg)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_reg,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_reg,
                        "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
        if (privcnt.mov_from_ar)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_from_ar,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_from_ar,
                        "privified-mov_from_ar", 
(privcnt.mov_from_ar*100L)/sum);
        if (privcnt.ssm)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.ssm,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.ssm,
                        "ssm", (privcnt.ssm*100L)/sum);
        if (privcnt.rsm)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rsm,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rsm,
                        "rsm", (privcnt.rsm*100L)/sum);
        if (privcnt.rfi)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rfi,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rfi,
                        "rfi", (privcnt.rfi*100L)/sum);
        if (privcnt.bsw0)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw0,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw0,
                        "bsw0", (privcnt.bsw0*100L)/sum);
        if (privcnt.bsw1)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw1,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw1,
                        "bsw1", (privcnt.bsw1*100L)/sum);
        if (privcnt.cover)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cover,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cover,
                        "cover", (privcnt.cover*100L)/sum);
        if (privcnt.fc)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.fc,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.fc,
                        "privified-fc", (privcnt.fc*100L)/sum);
        if (privcnt.cpuid)
-               s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cpuid,
+               s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cpuid,
                        "privified-getcpuid", (privcnt.cpuid*100L)/sum);
        for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
                if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
-               else s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.Mpriv_cnt[i],
+               else s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
                        Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
                if (i == 0x24) { // mov from CR
                        s += sprintf(s,"            [");
                        for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
                                if (!cr_str[j])
                                        s += sprintf(s,"PRIVSTRING NULL!!\n");
-                               s += 
sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
+                               s += 
sprintf(s,"%s(%ld),",cr_str[j],from_cr_cnt[j]);
                        }
                        s += sprintf(s,"]\n");
                }
@@ -996,7 +999,7 @@
                        for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
                                if (!cr_str[j])
                                        s += sprintf(s,"PRIVSTRING NULL!!\n");
-                               s += 
sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
+                               s += 
sprintf(s,"%s(%ld),",cr_str[j],to_cr_cnt[j]);
                        }
                        s += sprintf(s,"]\n");
                }
@@ -1050,7 +1053,7 @@
                s += sprintf(s,"%s:\n",v->instname);
                for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
                        if (!v->addr[j]) break;
-                       s += sprintf(s," @%p #%ld\n",v->addr[j],v->count[j]);
+                       s += sprintf(s," at 0x%lx 
#%ld\n",v->addr[j],v->count[j]);
                }
                if (v->overflow) 
                        s += sprintf(s," other #%ld\n",v->overflow);
@@ -1085,17 +1088,17 @@
 int dump_misc_stats(char *buf)
 {
        char *s = buf;
-       s += sprintf(s,"Virtual TR translations: %d\n",tr_translate_count);
-       s += sprintf(s,"Virtual VHPT slow translations: 
%d\n",vhpt_translate_count);
-       s += sprintf(s,"Virtual VHPT fast translations: 
%d\n",fast_vhpt_translate_count);
-       s += sprintf(s,"Virtual DTLB translations: %d\n",dtlb_translate_count);
-       s += sprintf(s,"Physical translations: %d\n",phys_translate_count);
-       s += sprintf(s,"Recoveries to page fault: 
%d\n",recover_to_page_fault_count);
-       s += sprintf(s,"Recoveries to break fault: 
%d\n",recover_to_break_fault_count);
-       s += sprintf(s,"Idle when pending: %d\n",idle_when_pending);
-       s += sprintf(s,"PAL_HALT_LIGHT (no pending): 
%d\n",pal_halt_light_count);
-       s += sprintf(s,"context switches: %d\n",context_switch_count);
-       s += sprintf(s,"Lazy covers: %d\n",lazy_cover_count);
+       s += sprintf(s,"Virtual TR translations: %ld\n",tr_translate_count);
+       s += sprintf(s,"Virtual VHPT slow translations: 
%ld\n",vhpt_translate_count);
+       s += sprintf(s,"Virtual VHPT fast translations: 
%ld\n",fast_vhpt_translate_count);
+       s += sprintf(s,"Virtual DTLB translations: %ld\n",dtlb_translate_count);
+       s += sprintf(s,"Physical translations: %ld\n",phys_translate_count);
+       s += sprintf(s,"Recoveries to page fault: 
%ld\n",recover_to_page_fault_count);
+       s += sprintf(s,"Recoveries to break fault: 
%ld\n",recover_to_break_fault_count);
+       s += sprintf(s,"Idle when pending: %ld\n",idle_when_pending);
+       s += sprintf(s,"PAL_HALT_LIGHT (no pending): 
%ld\n",pal_halt_light_count);
+       s += sprintf(s,"context switches: %ld\n",context_switch_count);
+       s += sprintf(s,"Lazy covers: %ld\n",lazy_cover_count);
        return s - buf;
 }
 
@@ -1120,17 +1123,17 @@
        char *s = buf;
        unsigned long total = 0;
        for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
-       s += sprintf(s,"Slow hyperprivops (total %d):\n",total);
+       s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
        for (i = 1; i <= HYPERPRIVOP_MAX; i++)
                if (slow_hyperpriv_cnt[i])
-                       s += sprintf(s,"%10d %s\n",
+                       s += sprintf(s,"%10ld %s\n",
                                slow_hyperpriv_cnt[i], hyperpriv_str[i]);
        total = 0;
        for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
-       s += sprintf(s,"Fast hyperprivops (total %d):\n",total);
+       s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
        for (i = 1; i <= HYPERPRIVOP_MAX; i++)
                if (fast_hyperpriv_cnt[i])
-                       s += sprintf(s,"%10d %s\n",
+                       s += sprintf(s,"%10ld %s\n",
                                fast_hyperpriv_cnt[i], hyperpriv_str[i]);
        return s - buf;
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/process.c       Thu Mar  2 10:00:49 2006
@@ -33,8 +33,14 @@
 #include <xen/multicall.h>
 #include <asm/debugger.h>
 
-extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
+/* FIXME: where these declarations shold be there ? */
+extern void load_region_regs(struct vcpu *);
+extern void panic_domain(struct pt_regs *, const char *, ...);
+extern long platform_is_hp_ski(void);
+extern int ia64_hyperprivop(unsigned long, REGS *);
+extern int ia64_hypercall(struct pt_regs *regs);
+extern void vmx_do_launch(struct vcpu *);
 
 extern unsigned long dom0_start, dom0_size;
 
@@ -94,18 +100,21 @@
        extern unsigned long dom0_start, dom0_size;
 
        // FIXME address had better be pre-validated on insert
-       mask = (1L << ((itir >> 2) & 0x3f)) - 1;
+       mask = ~itir_mask(itir);
        mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
        if (d == dom0) {
                if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
-                       //printk("translate_domain_pte: out-of-bounds dom0 
mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
+                       /*
+                       printk("translate_domain_pte: out-of-bounds dom0 mpaddr 
0x%lx! itc=%lx...\n",
+                               mpaddr, ia64_get_itc());
+                       */
                        tdpfoo();
                }
        }
        else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
                if ((mpaddr & ~0x1fffL ) != (1L << 40))
-               printf("translate_domain_pte: bad mpa=%p (> 
%p),vadr=%p,pteval=%p,itir=%p\n",
-                       mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
+               printf("translate_domain_pte: bad mpa=0x%lx (> 
0x%lx),vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
+                       mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT, 
address, pteval, itir);
                tdpfoo();
        }
        pteval2 = lookup_domain_mpa(d,mpaddr);
@@ -123,7 +132,8 @@
 
        if (current->domain == dom0) {
                if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
-                       printk("translate_domain_mpaddr: out-of-bounds dom0 
mpaddr %p! continuing...\n",mpaddr);
+                       printk("translate_domain_mpaddr: out-of-bounds dom0 
mpaddr 0x%lx! continuing...\n",
+                               mpaddr);
                        tdpfoo();
                }
        }
@@ -150,7 +160,7 @@
 
        s += sprintf(s,"Slow reflections by vector:\n");
        for (i = 0, j = 0; i < 0x80; i++) {
-               if (cnt = slow_reflect_count[i]) {
+               if ( (cnt = slow_reflect_count[i]) != 0 ) {
                        s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
                        if ((j++ & 3) == 3) s += sprintf(s,"\n");
                }
@@ -158,7 +168,7 @@
        if (j & 3) s += sprintf(s,"\n");
        s += sprintf(s,"Fast reflections by vector:\n");
        for (i = 0, j = 0; i < 0x80; i++) {
-               if (cnt = fast_reflect_count[i]) {
+               if ( (cnt = fast_reflect_count[i]) != 0 ) {
                        s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
                        if ((j++ & 3) == 3) s += sprintf(s,"\n");
                }
@@ -186,7 +196,6 @@
 
 void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned 
long vector)
 {
-       unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
        struct vcpu *v = current;
 
        if (!PSCB(v,interrupt_collection_enabled))
@@ -205,7 +214,7 @@
 #ifdef CONFIG_SMP
 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
 #endif
-       regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
+       regs->r31 = (unsigned long) &(((mapped_regs_t 
*)SHARED_ARCHINFO_ADDR)->ipsr);
 
        PSCB(v,interrupt_delivery_enabled) = 0;
        PSCB(v,interrupt_collection_enabled) = 0;
@@ -219,14 +228,12 @@
 
 void reflect_extint(struct pt_regs *regs)
 {
-       extern unsigned long vcpu_verbose, privop_trace;
        unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
        struct vcpu *v = current;
-       static first_extint = 1;
+       static int first_extint = 1;
 
        if (first_extint) {
-               printf("Delivering first extint to domain: isr=%p, 
iip=%p\n",isr,regs->cr_iip);
-               //privop_trace = 1; vcpu_verbose = 1;
+               printf("Delivering first extint to domain: isr=0x%lx, 
iip=0x%lx\n", isr, regs->cr_iip);
                first_extint = 0;
        }
        if (vcpu_timer_pending_early(v))
@@ -297,11 +304,11 @@
                        // should never happen.  If it does, region 0 addr may
                        // indicate a bad xen pointer
                        printk("*** xen_handle_domain_access: exception table"
-                              " lookup failed, iip=%p, addr=%p, spinning...\n",
-                               iip,address);
+                              " lookup failed, iip=0x%lx, addr=0x%lx, 
spinning...\n",
+                               iip, address);
                        panic_domain(regs,"*** xen_handle_domain_access: 
exception table"
-                              " lookup failed, iip=%p, addr=%p, spinning...\n",
-                               iip,address);
+                              " lookup failed, iip=0x%lx, addr=0x%lx, 
spinning...\n",
+                               iip, address);
                }
                return;
        }
@@ -329,10 +336,9 @@
            unsigned long arg6, unsigned long arg7, unsigned long stack)
 {
        struct pt_regs *regs = (struct pt_regs *) &stack;
-       unsigned long code, error = isr;
+       unsigned long code;
        char buf[128];
-       int result, sig;
-       static const char *reason[] = {
+       static const char * const reason[] = {
                "IA-64 Illegal Operation fault",
                "IA-64 Privileged Operation fault",
                "IA-64 Privileged Register fault",
@@ -543,7 +549,6 @@
 /**/   static int last_fd, last_count; // FIXME FIXME FIXME
 /**/                                   // BROKEN FOR MULTIPLE DOMAINS & SMP
 /**/   struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
-       extern unsigned long vcpu_verbose, privop_trace;
 
        arg0 = vcpu_get_gr(current,32);
        switch(ssc) {
@@ -588,11 +593,11 @@
                arg3 = vcpu_get_gr(current,35);
                if (arg2) {     // metaphysical address of descriptor
                        struct ssc_disk_req *req;
-                       unsigned long mpaddr, paddr;
+                       unsigned long mpaddr;
                        long len;
 
                        arg2 = translate_domain_mpaddr(arg2);
-                       req = (struct disk_req *)__va(arg2);
+                       req = (struct ssc_disk_req *) __va(arg2);
                        req->len &= 0xffffffffL;        // avoid strange bug
                        len = req->len;
 /**/                   last_fd = arg1;
@@ -640,7 +645,8 @@
                vcpu_set_gr(current,8,-1L,0);
                break;
            default:
-               printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... 
spinning\n",ssc,regs->cr_iip,regs->b0);
+               printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, 
b0=0x%lx... spinning\n",
+                       ssc, regs->cr_iip, regs->b0);
                while(1);
                break;
        }
@@ -696,8 +702,7 @@
 ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long 
isr, unsigned long itir)
 {
        IA64FAULT vector;
-       struct domain *d = current->domain;
-       struct vcpu *v = current;
+
        vector = priv_emulate(current,regs,isr);
        if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
                // Note: if a path results in a vector to reflect that requires
@@ -712,8 +717,7 @@
 void
 ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long 
isr, unsigned long iim, unsigned long vector)
 {
-       struct domain *d = (struct domain *) current->domain;
-       struct vcpu *v = (struct domain *) current;
+       struct vcpu *v = current;
        unsigned long check_lazy_cover = 0;
        unsigned long psr = regs->cr_ipsr;
 
@@ -753,7 +757,8 @@
                }
 #endif
 printf("*** NaT fault... attempting to handle as privop\n");
-printf("isr=%p, ifa=%p,iip=%p,ipsr=%p\n",isr,ifa,regs->cr_iip,psr);
+printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
+       isr, ifa, regs->cr_iip, psr);
                //regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
                // certain NaT faults are higher priority than privop faults
                vector = priv_emulate(v,regs,isr);
@@ -800,8 +805,7 @@
        unsigned int op, const char *format, ...)
 {
     struct mc_state *mcs = &mc_state[smp_processor_id()];
-    VCPU *vcpu = current;
-    struct cpu_user_regs *regs = vcpu_regs(vcpu);
+    struct vcpu *v = current;
     const char *p = format;
     unsigned long arg;
     unsigned int i;
@@ -811,7 +815,7 @@
     if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
        panic("PREEMPT happen in multicall\n"); // Not support yet
     } else {
-       vcpu_set_gr(vcpu, 2, op, 0);
+       vcpu_set_gr(v, 2, op, 0);
        for ( i = 0; *p != '\0'; i++) {
             switch ( *p++ )
             {
@@ -830,22 +834,22 @@
                 BUG();
             }
            switch (i) {
-           case 0: vcpu_set_gr(vcpu, 14, arg, 0);
+           case 0: vcpu_set_gr(v, 14, arg, 0);
                    break;
-           case 1: vcpu_set_gr(vcpu, 15, arg, 0);
+           case 1: vcpu_set_gr(v, 15, arg, 0);
                    break;
-           case 2: vcpu_set_gr(vcpu, 16, arg, 0);
+           case 2: vcpu_set_gr(v, 16, arg, 0);
                    break;
-           case 3: vcpu_set_gr(vcpu, 17, arg, 0);
+           case 3: vcpu_set_gr(v, 17, arg, 0);
                    break;
-           case 4: vcpu_set_gr(vcpu, 18, arg, 0);
+           case 4: vcpu_set_gr(v, 18, arg, 0);
                    break;
            default: panic("Too many args for hypercall continuation\n");
                    break;
            }
        }
     }
-    vcpu->arch.hypercall_continuation = 1;
+    v->arch.hypercall_continuation = 1;
     va_end(args);
     return op;
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c     Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/regionreg.c     Thu Mar  2 10:00:49 2006
@@ -18,6 +18,8 @@
 extern void ia64_new_rr7(unsigned long rid,void *shared_info, void 
*shared_arch_info, unsigned long p_vhpt, unsigned long v_pal);
 extern void *pal_vaddr;
 
+/* FIXME: where these declarations should be there ? */
+extern void panic_domain(struct pt_regs *, const char *, ...);
 
 #define        IA64_MIN_IMPL_RID_BITS  (IA64_MIN_IMPL_RID_MSB+1)
 #define        IA64_MAX_IMPL_RID_BITS  24
@@ -142,7 +144,7 @@
        // setup domain struct
        d->arch.rid_bits = ridbits;
        d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid 
= (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
-printf("###allocating rid_range, domain %p: starting_rid=%lx, 
ending_rid=%lx\n",
+printf("###allocating rid_range, domain %p: starting_rid=%x, ending_rid=%x\n",
 d,d->arch.starting_rid, d->arch.ending_rid);
        
        return 1;
@@ -211,7 +213,6 @@
        unsigned long rreg = REGION_NUMBER(rr);
        ia64_rr rrv, newrrv, memrrv;
        unsigned long newrid;
-       extern unsigned long vhpt_paddr;
 
        if (val == -1) return 1;
 
@@ -220,8 +221,8 @@
        newrid = v->arch.starting_rid + rrv.rid;
 
        if (newrid > v->arch.ending_rid) {
-               printk("can't set rr%d to %lx, starting_rid=%lx,"
-                       "ending_rid=%lx, val=%lx\n", rreg, newrid,
+               printk("can't set rr%d to %lx, starting_rid=%x,"
+                       "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
                        v->arch.starting_rid,v->arch.ending_rid,val);
                return 0;
        }
@@ -249,10 +250,12 @@
        newrrv.rid = newrid;
        newrrv.ve = 1;  // VHPT now enabled for region 7!!
        newrrv.ps = PAGE_SHIFT;
-       if (rreg == 0) v->arch.metaphysical_saved_rr0 =
-               vmMangleRID(newrrv.rrval);
-       if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
-                               v->arch.privregs, vhpt_paddr, pal_vaddr);
+       if (rreg == 0)
+               v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
+       else if (rreg == 7)
+               ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+                            v->arch.privregs, __get_cpu_var(vhpt_paddr),
+                            (unsigned long) pal_vaddr);
        else set_rr(rr,newrrv.rrval);
 #endif
        return 1;
@@ -262,11 +265,12 @@
 int set_metaphysical_rr0(void)
 {
        struct vcpu *v = current;
-       ia64_rr rrv;
+//     ia64_rr rrv;
        
 //     rrv.ve = 1;     FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
        ia64_set_rr(0,v->arch.metaphysical_rr0);
        ia64_srlz_d();
+       return 1;
 }
 
 // validates/changes region registers 0-6 in the currently executing domain
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/sn_console.c
--- a/xen/arch/ia64/xen/sn_console.c    Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/sn_console.c    Thu Mar  2 10:00:49 2006
@@ -9,7 +9,13 @@
 #include <asm/sn/sn_sal.h>
 #include <xen/serial.h>
 
-void sn_putc(struct serial_port *, char);
+/*
+ * sn_putc - Send a character to the console, polled or interrupt mode
+ */
+static void sn_putc(struct serial_port *port, char c)
+{
+       ia64_sn_console_putc(c);
+}
 
 static struct uart_driver sn_sal_console = {
        .putc = sn_putc,
@@ -75,11 +81,3 @@
 
        return 0;
 }
-
-/*
- * sn_putc - Send a character to the console, polled or interrupt mode
- */
-void sn_putc(struct serial_port *port, char c)
-{
-       return ia64_sn_console_putc(c);
-}
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/vcpu.c  Thu Mar  2 10:00:49 2006
@@ -21,7 +21,15 @@
 #include <asm/processor.h>
 #include <asm/delay.h>
 #include <asm/vmx_vcpu.h>
+#include <asm/vhpt.h>
+#include <asm/tlbflush.h>
 #include <xen/event.h>
+
+/* FIXME: where these declarations should be there ? */
+extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct 
pt_regs *regs);
+extern void setreg(unsigned long regnum, unsigned long val, int nat, struct 
pt_regs *regs);
+extern void panic_domain(struct pt_regs *, const char *, ...);
+extern int set_metaphysical_rr0(void);
 
 typedef        union {
        struct ia64_psr ia64_psr;
@@ -47,10 +55,10 @@
 #define STATIC
 
 #ifdef PRIVOP_ADDR_COUNT
-struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
-       { "=ifa", { 0 }, { 0 }, 0 },
+struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS+1] = {
+       { "=ifa",  { 0 }, { 0 }, 0 },
        { "thash", { 0 }, { 0 }, 0 },
-       0
+       { 0,       { 0 }, { 0 }, 0 }
 };
 extern void privop_count_addr(unsigned long addr, int inst);
 #define        PRIVOP_COUNT_ADDR(regs,inst) 
privop_count_addr(regs->cr_iip,inst)
@@ -81,7 +89,7 @@
 **************************************************************************/
 #ifdef XEN
 UINT64
-vcpu_get_gr(VCPU *vcpu, unsigned reg)
+vcpu_get_gr(VCPU *vcpu, unsigned long reg)
 {
        REGS *regs = vcpu_regs(vcpu);
        UINT64 val;
@@ -90,7 +98,7 @@
        return val;
 }
 IA64FAULT
-vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
+vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
 {
        REGS *regs = vcpu_regs(vcpu);
     int nat;
@@ -104,7 +112,7 @@
 //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
 //   IA64_NO_FAULT otherwise
 IA64FAULT
-vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
+vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
 {
        REGS *regs = vcpu_regs(vcpu);
        if (!reg) return IA64_ILLOP_FAULT;
@@ -118,7 +126,7 @@
 //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
 //   IA64_NO_FAULT otherwise
 IA64FAULT
-vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
+vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
 {
        REGS *regs = vcpu_regs(vcpu);
        long sof = (regs->cr_ifs) & 0x7f;
@@ -375,7 +383,7 @@
 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
 {
        UINT64 dcr = PSCBX(vcpu,dcr);
-       PSR psr = {0};
+       PSR psr;
 
        //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
        psr.i64 = prevpsr;
@@ -397,7 +405,7 @@
 
 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
 {
-extern unsigned long privop_trace;
+//extern unsigned long privop_trace;
 //privop_trace=0;
 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
        // Reads of cr.dcr on Xen always have the sign bit set, so
@@ -525,7 +533,7 @@
 
 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
 {
-extern unsigned long privop_trace;
+//extern unsigned long privop_trace;
 //privop_trace=1;
        // Reads of cr.dcr on SP always have the sign bit set, so
        // a domain can differentiate whether it is running on SP or not
@@ -643,7 +651,6 @@
            set_bit(vector,VCPU(vcpu,irr));
     } else
     {
-       /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
        if (test_bit(vector,PSCBX(vcpu,irr))) {
 //printf("vcpu_pend_interrupt: overrun\n");
        }
@@ -683,7 +690,6 @@
                vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
 
        p = &PSCBX(vcpu,irr[3]);
-       /* q = &PSCB(vcpu,delivery_mask[3]); */
        r = &PSCBX(vcpu,insvc[3]);
        for (i = 3; ; p--, q--, r--, i--) {
                bits = *p /* & *q */;
@@ -747,7 +753,7 @@
 
 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
 {
-extern unsigned long privop_trace;
+//extern unsigned long privop_trace;
 //privop_trace=1;
        //TODO: Implement this
        printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
@@ -764,9 +770,10 @@
 #define HEARTBEAT_FREQ 16      // period in seconds
 #ifdef HEARTBEAT_FREQ
 #define N_DOMS 16      // period in seconds
+#if 0
        static long count[N_DOMS] = { 0 };
+#endif
        static long nonclockcount[N_DOMS] = { 0 };
-       REGS *regs = vcpu_regs(vcpu);
        unsigned domid = vcpu->domain->domain_id;
 #endif
 #ifdef IRQ_DEBUG
@@ -803,7 +810,7 @@
        // getting ivr has "side effects"
 #ifdef IRQ_DEBUG
        if (firsttime[vector]) {
-               printf("*** First get_ivr on vector=%d,itc=%lx\n",
+               printf("*** First get_ivr on vector=%lu,itc=%lx\n",
                        vector,ia64_get_itc());
                firsttime[vector]=0;
        }
@@ -817,7 +824,7 @@
 
        i = vector >> 6;
        mask = 1L << (vector & 0x3f);
-//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
+//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
        PSCBX(vcpu,insvc[i]) |= mask;
        PSCBX(vcpu,irr[i]) &= ~mask;
        //PSCB(vcpu,pending_interruption)--;
@@ -978,27 +985,18 @@
        return (IA64_NO_FAULT);
 }
 
-// parameter is a time interval specified in cycles
-void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
-{
-    PSCBX(vcpu,xen_timer_interval) = cycles;
-    vcpu_set_next_timer(vcpu);
-    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
-             PSCBX(vcpu,xen_timer_interval));
-    __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
-}
-
 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
 {
-extern unsigned long privop_trace;
+//extern unsigned long privop_trace;
 //privop_trace=1;
        if (val & 0xef00) return (IA64_ILLOP_FAULT);
        PSCB(vcpu,itv) = val;
        if (val & 0x10000) {
-printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 
0\n",val,PSCBX(vcpu,domain_itm));
+               printf("**** vcpu_set_itv(%lu): vitm=%lx, setting to 0\n",
+                      val,PSCBX(vcpu,domain_itm));
                PSCBX(vcpu,domain_itm) = 0;
        }
-       else vcpu_enable_timer(vcpu,1000000L);
+       else vcpu_set_next_timer(vcpu);
        return (IA64_NO_FAULT);
 }
 
@@ -1080,7 +1078,6 @@
        //UINT64 s = PSCBX(vcpu,xen_itm);
        UINT64 s = local_cpu_data->itm_next;
        UINT64 now = ia64_get_itc();
-       //UINT64 interval = PSCBX(vcpu,xen_timer_interval);
 
        /* gloss over the wraparound problem for now... we know it exists
         * but it doesn't matter right now */
@@ -1103,7 +1100,7 @@
 
 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
 {
-       UINT now = ia64_get_itc();
+       //UINT now = ia64_get_itc();
 
        //if (val < now) val = now + 1000;
 //printf("*** vcpu_set_itm: called with %lx\n",val);
@@ -1114,7 +1111,10 @@
 
 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
 {
-
+#define DISALLOW_SETTING_ITC_FOR_NOW
+#ifdef DISALLOW_SETTING_ITC_FOR_NOW
+printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
+#else
        UINT64 oldnow = ia64_get_itc();
        UINT64 olditm = PSCBX(vcpu,domain_itm);
        unsigned long d = olditm - oldnow;
@@ -1122,10 +1122,6 @@
 
        UINT64 newnow = val, min_delta;
 
-#define DISALLOW_SETTING_ITC_FOR_NOW
-#ifdef DISALLOW_SETTING_ITC_FOR_NOW
-printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
-#else
        local_irq_disable();
        if (olditm) {
 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
@@ -1293,9 +1289,6 @@
        return (IA64_ILLOP_FAULT);
 }
 
-#define itir_ps(itir)  ((itir >> 2) & 0x3f)
-#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
-
 unsigned long vhpt_translate_count = 0;
 unsigned long fast_vhpt_translate_count = 0;
 unsigned long recover_to_page_fault_count = 0;
@@ -1317,7 +1310,7 @@
 // this down, but since it has been apparently harmless, just flag it for now
 //                     panic_domain(vcpu_regs(vcpu),
                        printk(
-                        "vcpu_translate: bad physical address: %p\n",address);
+                        "vcpu_translate: bad physical address: 
0x%lx\n",address);
                }
                *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | 
_PAGE_PL_2 | _PAGE_AR_RWX;
                *itir = PAGE_SHIFT << 2;
@@ -1330,7 +1323,8 @@
                unsigned long vipsr = PSCB(vcpu,ipsr);
                unsigned long iip = regs->cr_iip;
                unsigned long ipsr = regs->cr_ipsr;
-               printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, 
iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
+               printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, 
vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
+                       address, viip, vipsr, iip, ipsr);
        }
 
        rr = PSCB(vcpu,rrs)[region];
@@ -1798,7 +1792,7 @@
 
 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
-       unsigned long pteval, logps = (itir >> 2) & 0x3f;
+       unsigned long pteval, logps = itir_ps(itir);
        unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
        BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
 
@@ -1818,7 +1812,7 @@
 
 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
 {
-       unsigned long pteval, logps = (itir >> 2) & 0x3f;
+       unsigned long pteval, logps = itir_ps(itir);
        unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
        BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
 
@@ -1891,7 +1885,7 @@
 
 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
 {
-       extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
+       extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 
nbits);
        // FIXME: validate not flushing Xen addresses
        // if (Xen address) return(IA64_ILLOP_FAULT);
        // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/vhpt.c  Thu Mar  2 10:00:49 2006
@@ -15,12 +15,13 @@
 #include <asm/dma.h>
 #include <asm/vhpt.h>
 
-unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
+DEFINE_PER_CPU (unsigned long, vhpt_paddr);
+DEFINE_PER_CPU (unsigned long, vhpt_pend);
 
 void vhpt_flush(void)
 {
        struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
-       int i, cnt = 0;
+       int i;
 #if 0
 static int firsttime = 2;
 
@@ -47,7 +48,6 @@
 #ifdef VHPT_GLOBAL
 void vhpt_flush_address(unsigned long vadr, unsigned long addr_range)
 {
-       unsigned long ps;
        struct vhpt_lf_entry *vlfe;
 
        if ((vadr >> 61) == 7) {
@@ -77,12 +77,12 @@
 }
 #endif
 
-void vhpt_map(void)
+static void vhpt_map(unsigned long pte)
 {
        unsigned long psr;
 
        psr = ia64_clear_ic();
-       ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
+       ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
        ia64_set_psr(psr);
        ia64_srlz_i();
 }
@@ -121,29 +121,35 @@
 
 void vhpt_init(void)
 {
-       unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
+       unsigned long vhpt_total_size, vhpt_alignment;
+       unsigned long paddr, pte;
+       struct page_info *page;
 #if !VHPT_ENABLED
        return;
 #endif
        // allocate a huge chunk of physical memory.... how???
        vhpt_total_size = 1 << VHPT_SIZE_LOG2;  // 4MB, 16MB, 64MB, or 256MB
        vhpt_alignment = 1 << VHPT_SIZE_LOG2;   // 4MB, 16MB, 64MB, or 256MB
-       printf("vhpt_init: vhpt size=%p, 
align=%p\n",vhpt_total_size,vhpt_alignment);
+       printf("vhpt_init: vhpt size=0x%lx, align=0x%lx\n",
+               vhpt_total_size, vhpt_alignment);
        /* This allocation only holds true if vhpt table is unique for
         * all domains. Or else later new vhpt table should be allocated
         * from domain heap when each domain is created. Assume xen buddy
         * allocator can provide natural aligned page by order?
         */
-       vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
-       if (!vhpt_imva) {
+//     vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
+       page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
+       if (!page) {
                printf("vhpt_init: can't allocate VHPT!\n");
                while(1);
        }
-       vhpt_paddr = __pa(vhpt_imva);
-       vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
-       printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
-       vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
-       vhpt_map();
+       paddr = page_to_maddr(page);
+       __get_cpu_var(vhpt_paddr) = paddr;
+       __get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
+       printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
+               paddr, __get_cpu_var(vhpt_pend));
+       pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
+       vhpt_map(pte);
        ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
                VHPT_ENABLED);
        vhpt_flush();
@@ -167,6 +173,6 @@
                if (v->CChain) vhpt_chains++;
        }
        s += sprintf(s,"VHPT usage: %ld/%ld (%ld collision chains)\n",
-               vhpt_valid,VHPT_NUM_ENTRIES,vhpt_chains);
+               vhpt_valid, (unsigned long) VHPT_NUM_ENTRIES, vhpt_chains);
        return s - buf;
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/xenirq.c
--- a/xen/arch/ia64/xen/xenirq.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/xenirq.c        Thu Mar  2 10:00:49 2006
@@ -24,7 +24,7 @@
                firstirq = 0;
        }
        if (firsttime[vector]) {
-               printf("**** (entry) First received int on vector=%d,itc=%lx\n",
+               printf("**** (entry) First received int on 
vector=%lu,itc=%lx\n",
                        (unsigned long) vector, ia64_get_itc());
                firsttime[vector] = 0;
        }
@@ -38,13 +38,13 @@
                extern void vcpu_pend_interrupt(void *, int);
 #if 0
                if (firsttime[vector]) {
-                       printf("**** (iterate) First received int on 
vector=%d,itc=%lx\n",
-                       (unsigned long) vector, ia64_get_itc());
+                       printf("**** (iterate) First received int on 
vector=%lu,itc=%lx\n",
+                               (unsigned long) vector, ia64_get_itc());
                        firsttime[vector] = 0;
                }
                if (firstpend[vector]) {
-                       printf("**** First pended int on vector=%d,itc=%lx\n",
-                               (unsigned long) vector,ia64_get_itc());
+                       printf("**** First pended int on vector=%lu,itc=%lx\n",
+                               (unsigned long) vector, ia64_get_itc());
                        firstpend[vector] = 0;
                }
 #endif
@@ -59,7 +59,7 @@
 /*
  * Exit an interrupt context. Process softirqs if needed and possible:
  */
-void xen_irq_exit(struct pt_regs *regs)
+void irq_exit(void)
 {
        sub_preempt_count(IRQ_EXIT_OFFSET);
 }
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/xenmem.c
--- a/xen/arch/ia64/xen/xenmem.c        Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/xenmem.c        Thu Mar  2 10:00:49 2006
@@ -34,7 +34,6 @@
 void
 paging_init (void)
 {
-       struct page_info *pg;
        unsigned int mpt_order;
        /* Create machine to physical mapping table
         * NOTE: similar to frame table, later we may need virtually
@@ -61,7 +60,7 @@
 #define FT_ALIGN_SIZE  (16UL << 20)
 void __init init_frametable(void)
 {
-       unsigned long i, pfn;
+       unsigned long pfn;
        frame_table_size = max_page * sizeof(struct page_info);
        frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/xenmisc.c       Thu Mar  2 10:00:49 2006
@@ -19,12 +19,18 @@
 #include <public/sched.h>
 #include <asm/vhpt.h>
 #include <asm/debugger.h>
+#include <asm/vmx.h>
+#include <asm/vmx_vcpu.h>
 
 efi_memory_desc_t ia64_efi_io_md;
 EXPORT_SYMBOL(ia64_efi_io_md);
 unsigned long wait_init_idle;
 int phys_proc_id[NR_CPUS];
 unsigned long loops_per_jiffy = (1<<12);       // from linux/init/main.c
+
+/* FIXME: where these declarations should be there ? */
+extern void load_region_regs(struct vcpu *);
+extern void show_registers(struct pt_regs *regs);
 
 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check 
abort handling)\n"); }
 void ia64_mca_cpu_init(void *x) { }
@@ -168,7 +174,11 @@
 
 void *pgtable_quicklist_alloc(void)
 {
-       return alloc_xenheap_pages(0);
+    void *p;
+    p = alloc_xenheap_pages(0);
+    if (p) 
+        clear_page(p);
+    return p;
 }
 
 void pgtable_quicklist_free(void *pgtable_entry)
@@ -247,6 +257,7 @@
           unsigned long user_rbs_end, unsigned long addr, long *val)
 {
        printk("ia64_peek: called, not implemented\n");
+       return 1;
 }
 
 long
@@ -254,6 +265,7 @@
           unsigned long user_rbs_end, unsigned long addr, long val)
 {
        printk("ia64_poke: called, not implemented\n");
+       return 1;
 }
 
 void
@@ -291,6 +303,7 @@
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
     uint64_t spsr;
+    uint64_t pta;
 
     local_irq_save(spsr);
     if(VMX_DOMAIN(prev)){
@@ -298,9 +311,9 @@
     }
        context_switch_count++;
        switch_to(prev,next,prev);
-    if(VMX_DOMAIN(current)){
-        vtm_domain_in(current);
-    }
+//    if(VMX_DOMAIN(current)){
+//        vtm_domain_in(current);
+//    }
 
 // leave this debug for now: it acts as a heartbeat when more than
 // one domain is active
@@ -309,22 +322,30 @@
 static int i = 100;
 int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
 if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
-if (!i--) { printk("+",id); i = 1000000; }
+if (!i--) { printk("+"); i = 1000000; }
 }
 
     if (VMX_DOMAIN(current)){
+        vtm_domain_in(current);
                vmx_load_all_rr(current);
     }else{
-       extern char ia64_ivt;
-       ia64_set_iva(&ia64_ivt);
-       ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
-               VHPT_ENABLED);
+       extern char ia64_ivt;
+       ia64_set_iva(&ia64_ivt);
        if (!is_idle_domain(current->domain)) {
+               ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
+                       VHPT_ENABLED);
                load_region_regs(current);
                vcpu_load_kernel_regs(current);
-                   if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
-       }
-           if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
+                   if (vcpu_timer_expired(current))
+                vcpu_pend_timer(current);
+       }else {
+        /* When switching to idle domain, only need to disable vhpt
+        * walker. Then all accesses happen within idle context will
+        * be handled by TR mapping and identity mapping.
+        */
+           pta = ia64_get_pta();
+           ia64_set_pta(pta & ~VHPT_ENABLED);
+        }
     }
 
     local_irq_restore(spsr);
@@ -345,12 +366,12 @@
        va_list args;
        char buf[128];
        struct vcpu *v = current;
-       static volatile int test = 1;   // so can continue easily in debug
-       extern spinlock_t console_lock;
-       unsigned long flags;
+//     static volatile int test = 1;   // so can continue easily in debug
+//     extern spinlock_t console_lock;
+//     unsigned long flags;
     
 loop:
-       printf("$$$$$ PANIC in domain %d (k6=%p): ",
+       printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
                v->domain->domain_id, 
                __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
        va_start(args, fmt);
@@ -365,7 +386,7 @@
        }
        domain_pause_by_systemcontroller(current->domain);
        v->domain->shutdown_code = SHUTDOWN_crash;
-       set_bit(_DOMF_shutdown, v->domain->domain_flags);
+       set_bit(_DOMF_shutdown, &v->domain->domain_flags);
        if (v->domain->domain_id == 0) {
                int i = 1000000000L;
                // if domain0 crashes, just periodically print out panic
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/xensetup.c      Thu Mar  2 10:00:49 2006
@@ -14,7 +14,7 @@
 #include <public/version.h>
 //#include <xen/delay.h>
 #include <xen/compile.h>
-//#include <xen/console.h>
+#include <xen/console.h>
 #include <xen/serial.h>
 #include <xen/trace.h>
 #include <asm/meminit.h>
@@ -22,6 +22,7 @@
 #include <asm/setup.h>
 #include <xen/string.h>
 #include <asm/vmx.h>
+#include <linux/efi.h>
 
 unsigned long xenheap_phys_end;
 
@@ -31,13 +32,20 @@
 
 cpumask_t cpu_present_map;
 
-#ifdef CLONE_DOMAIN0
-struct domain *clones[CLONE_DOMAIN0];
-#endif
 extern unsigned long domain0_ready;
 
 int find_max_pfn (unsigned long, unsigned long, void *);
 void start_of_day(void);
+
+/* FIXME: which header these declarations should be there ? */
+extern long is_platform_hp_ski(void);
+extern void early_setup_arch(char **);
+extern void late_setup_arch(char **);
+extern void hpsim_serial_init(void);
+extern void alloc_dom0(void);
+extern void setup_per_cpu_areas(void);
+extern void mem_init(void);
+extern void init_IRQ(void);
 
 /* opt_nosmp: If true, secondary processors are ignored. */
 static int opt_nosmp = 0;
@@ -147,13 +155,30 @@
     .parity    = 'n',
     .stop_bits = 1
 };
+/*  This is a wrapper function of init_domheap_pages,
+ *  memory exceeds (max_page<<PAGE_SHIFT) will not be reclaimed.
+ *  This function will go away when the virtual memmap/discontig
+ *  memory issues are solved
+ */
+void init_domheap_pages_wrapper(unsigned long ps, unsigned long pe)
+{
+    unsigned long s_nrm, e_nrm, max_mem;
+    max_mem = (max_page+1)<<PAGE_SHIFT;
+    s_nrm = (ps+PAGE_SIZE-1)&PAGE_MASK;
+    e_nrm = pe&PAGE_MASK;
+    s_nrm = min(s_nrm, max_mem);
+    e_nrm = min(e_nrm, max_mem);
+    if(s_nrm < e_nrm)
+         init_domheap_pages(s_nrm, e_nrm);
+}
+
+
 
 void start_kernel(void)
 {
     unsigned char *cmdline;
     void *heap_start;
-    int i;
-    unsigned long max_mem, nr_pages, firsthole_start;
+    unsigned long nr_pages, firsthole_start;
     unsigned long dom0_memory_start, dom0_memory_size;
     unsigned long dom0_initrd_start, dom0_initrd_size;
     unsigned long initial_images_start, initial_images_end;
@@ -163,7 +188,7 @@
     /* Kernel may be relocated by EFI loader */
     xen_pstart = ia64_tpa(KERNEL_START);
 
-    early_setup_arch(&cmdline);
+    early_setup_arch((char **) &cmdline);
 
     /* We initialise the serial devices very early so we can get debugging. */
     if (running_on_sim) hpsim_serial_init();
@@ -251,9 +276,9 @@
        max_page);
 
     heap_start = memguard_init(ia64_imva(&_end));
-    printf("Before heap_start: 0x%lx\n", heap_start);
+    printf("Before heap_start: %p\n", heap_start);
     heap_start = __va(init_boot_allocator(__pa(heap_start)));
-    printf("After heap_start: 0x%lx\n", heap_start);
+    printf("After heap_start: %p\n", heap_start);
 
     reserve_memory();
 
@@ -284,7 +309,7 @@
     idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
     BUG_ON(idle_domain == NULL);
 
-    late_setup_arch(&cmdline);
+    late_setup_arch((char **) &cmdline);
     setup_per_cpu_areas();
     mem_init();
 
@@ -301,6 +326,8 @@
 #endif
 
 #ifdef CONFIG_SMP
+    int i;
+
     if ( opt_nosmp )
     {
         max_cpus = 0;
@@ -342,16 +369,6 @@
 printk("About to call domain_create()\n");
     dom0 = domain_create(0, 0);
 
-#ifdef CLONE_DOMAIN0
-    {
-    int i;
-    for (i = 0; i < CLONE_DOMAIN0; i++) {
-       clones[i] = domain_create(i+1, 0);
-        if ( clones[i] == NULL )
-            panic("Error creating domain0 clone %d\n",i);
-    }
-    }
-#endif
     if ( dom0 == NULL )
         panic("Error creating domain 0\n");
 
@@ -362,9 +379,9 @@
      * above our heap. The second module, if present, is an initrd ramdisk.
      */
     printk("About to call construct_dom0()\n");
-    dom0_memory_start = __va(initial_images_start);
+    dom0_memory_start = (unsigned long) __va(initial_images_start);
     dom0_memory_size = ia64_boot_param->domain_size;
-    dom0_initrd_start = __va(initial_images_start +
+    dom0_initrd_start = (unsigned long) __va(initial_images_start +
                             PAGE_ALIGN(ia64_boot_param->domain_size));
     dom0_initrd_size = ia64_boot_param->initrd_size;
  
@@ -376,29 +393,15 @@
     /* PIN domain0 on CPU 0.  */
     dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
 
-#ifdef CLONE_DOMAIN0
-    {
-    int i;
-    dom0_memory_start = __va(ia64_boot_param->domain_start);
-    dom0_memory_size = ia64_boot_param->domain_size;
-
-    for (i = 0; i < CLONE_DOMAIN0; i++) {
-      printk("CONSTRUCTING DOMAIN0 CLONE #%d\n",i+1);
-      if ( construct_domU(clones[i], dom0_memory_start, dom0_memory_size,
-                         dom0_initrd_start,dom0_initrd_size,
-                         0) != 0)
-            panic("Could not set up DOM0 clone %d\n",i);
-    }
-    }
-#endif
-
     /* The stash space for the initial kernel image can now be freed up. */
-    init_domheap_pages(ia64_boot_param->domain_start,
-                       ia64_boot_param->domain_size);
+    /* init_domheap_pages_wrapper is temporary solution, please refer to the
+     * descriptor of this function */
+    init_domheap_pages_wrapper(ia64_boot_param->domain_start,
+           ia64_boot_param->domain_start+ia64_boot_param->domain_size);
     /* throw away initrd area passed from elilo */
     if (ia64_boot_param->initrd_size) {
-        init_domheap_pages(ia64_boot_param->initrd_start,
-                          ia64_boot_param->initrd_size);
+        init_domheap_pages_wrapper(ia64_boot_param->initrd_start,
+           ia64_boot_param->initrd_start+ia64_boot_param->initrd_size);
     }
 
     if (!running_on_sim)  // slow on ski and pages are pre-initialized to zero
@@ -412,13 +415,6 @@
     console_endboot(cmdline && strstr(cmdline, "tty0"));
 #endif
 
-#ifdef CLONE_DOMAIN0
-    {
-    int i;
-    for (i = 0; i < CLONE_DOMAIN0; i++)
-       domain_unpause_by_systemcontroller(clones[i]);
-    }
-#endif
     domain0_ready = 1;
 
     local_irq_enable();
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/arch/ia64/xen/xentime.c
--- a/xen/arch/ia64/xen/xentime.c       Thu Mar  2 09:59:34 2006
+++ b/xen/arch/ia64/xen/xentime.c       Thu Mar  2 10:00:49 2006
@@ -30,6 +30,9 @@
 #include <linux/jiffies.h>     // not included by xen/sched.h
 #include <xen/softirq.h>
 
+/* FIXME: where these declarations should be there ? */
+extern void ia64_init_itm(void);
+
 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
 
 #define TIME_KEEPER_ID  0
@@ -70,7 +73,7 @@
 s_time_t get_s_time(void)
 {
     s_time_t now;
-    unsigned long flags, seq;
+    unsigned long seq;
 
     do {
        seq = read_seqbegin(&xtime_lock);
@@ -202,7 +205,7 @@
 }
 
 static struct irqaction xen_timer_irqaction = {
-       .handler =      xen_timer_interrupt,
+       .handler =      (void *) xen_timer_interrupt,
        .name =         "timer"
 };
 
@@ -217,8 +220,6 @@
 /* Late init function (after all CPUs are booted). */
 int __init init_xen_time()
 {
-    struct timespec tm;
-
     ia64_time_init();
     itc_scale  = 1000000000UL << 32 ;
     itc_scale /= local_cpu_data->itc_freq;
@@ -253,7 +254,7 @@
        } while (unlikely(read_seqretry(&xtime_lock, seq)));
 
        local_cpu_data->itm_next = itm_next;
-       vcpu_set_next_timer(current);
+       vcpu_set_next_timer(v);
        return 1;
 }
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/config.h     Thu Mar  2 10:00:49 2006
@@ -3,11 +3,8 @@
 
 #undef USE_PAL_EMULATOR
 // control flags for turning on/off features under test
-#undef CLONE_DOMAIN0
-//#define CLONE_DOMAIN0 1
 #undef DOMU_BUILD_STAGING
 #define VHPT_GLOBAL
-#define DOMU_AUTO_RESTART
 
 #undef DEBUG_PFMON
 
@@ -24,7 +21,7 @@
 #define CONFIG_EFI_PCDP
 #define CONFIG_SERIAL_SGI_L1_CONSOLE
 
-#undef CONFIG_XEN_SMP
+#define CONFIG_XEN_SMP
 
 #ifdef CONFIG_XEN_SMP
 #define CONFIG_SMP 1
@@ -72,7 +69,7 @@
 extern unsigned long xenheap_phys_end;
 extern unsigned long xen_pstart;
 extern unsigned long xenheap_size;
-extern struct domain *dom0;
+//extern struct domain *dom0;
 extern unsigned long dom0_start;
 extern unsigned long dom0_size;
 
@@ -211,9 +208,9 @@
 
 // see include/asm-ia64/mm.h, handle remaining page_info uses until gone
 #define page_info page
-
-// see common/memory.c
-#define set_gpfn_from_mfn(x,y) do { } while (0)
+// Deprivated linux inf and put here for short time compatibility
+#define kmalloc(s, t) xmalloc_bytes((s))
+#define kfree(s) xfree((s))
 
 // see common/keyhandler.c
 #define        nop()   asm volatile ("nop 0")
@@ -254,10 +251,8 @@
 #define seq_printf(a,b...) printf(b)
 #define CONFIG_BLK_DEV_INITRD // needed to reserve memory for domain0
 
-#define FORCE_CRASH()  asm("break 0;;");
-
 void dummy_called(char *function);
-#define dummy()        dummy_called(__FUNCTION__)
+#define dummy()        dummy_called((char *) __FUNCTION__)
 
 // these declarations got moved at some point, find a better place for them
 extern int ht_per_core;
@@ -295,14 +290,17 @@
 #endif /* __XEN_IA64_CONFIG_H__ */
 
 // needed for include/xen/smp.h
-#ifdef CONFIG_SMP
-#define raw_smp_processor_id() current->processor
-#else
-#define raw_smp_processor_id() 0
-#endif
+//#ifdef CONFIG_SMP
+//#define raw_smp_processor_id()       current->processor
+//#else
+//#define raw_smp_processor_id()       0
+//#endif
 
 #ifndef __ASSEMBLY__
 #include <linux/linkage.h>
+#define FORCE_CRASH()  asm("break.m 0;;");
+#else
+#define FORCE_CRASH    break.m 0;;
 #endif
 
 #endif /* _IA64_CONFIG_H_ */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/debugger.h
--- a/xen/include/asm-ia64/debugger.h   Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/debugger.h   Thu Mar  2 10:00:49 2006
@@ -39,6 +39,8 @@
 #ifndef __ASSEMBLY__
 
 #include <xen/gdbstub.h>
+
+void show_registers(struct cpu_user_regs *regs);
 
 // NOTE: on xen struct pt_regs = struct cpu_user_regs
 //       see include/asm-ia64/linux-xen/asm/ptrace.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/domain.h     Thu Mar  2 10:00:49 2006
@@ -9,14 +9,15 @@
 #include <public/arch-ia64.h>
 #include <asm/vmx_platform.h>
 #include <xen/list.h>
+#include <xen/cpumask.h>
 
 extern void domain_relinquish_resources(struct domain *);
 
 struct arch_domain {
     struct mm_struct *active_mm;
     struct mm_struct *mm;
-    int metaphysical_rr0;
-    int metaphysical_rr4;
+    unsigned long metaphysical_rr0;
+    unsigned long metaphysical_rr4;
     int starting_rid;          /* first RID assigned to domain */
     int ending_rid;            /* one beyond highest RID assigned to domain */
     int rid_bits;              /* number of virtual rid bits (default: 18) */
@@ -32,11 +33,6 @@
     u64 xen_vastart;
     u64 xen_vaend;
     u64 shared_info_va;
-#ifdef DOMU_AUTO_RESTART
-    u64 image_start;
-    u64 image_len;
-    u64 entry;
-#endif
     unsigned long initrd_start;
     unsigned long initrd_len;
     char *cmdline;
@@ -63,13 +59,12 @@
        unsigned long domain_itm;
        unsigned long domain_itm_last;
        unsigned long xen_itm;
-       unsigned long xen_timer_interval;
 #endif
     mapped_regs_t *privregs; /* save the state of vcpu */
-    int metaphysical_rr0;              // from arch_domain (so is pinned)
-    int metaphysical_rr4;              // from arch_domain (so is pinned)
-    int metaphysical_saved_rr0;                // from arch_domain (so is 
pinned)
-    int metaphysical_saved_rr4;                // from arch_domain (so is 
pinned)
+    unsigned long metaphysical_rr0;            // from arch_domain (so is 
pinned)
+    unsigned long metaphysical_rr4;            // from arch_domain (so is 
pinned)
+    unsigned long metaphysical_saved_rr0;      // from arch_domain (so is 
pinned)
+    unsigned long metaphysical_saved_rr4;      // from arch_domain (so is 
pinned)
     int breakimm;                      // from arch_domain (so is pinned)
     int starting_rid;          /* first RID assigned to domain */
     int ending_rid;            /* one beyond highest RID assigned to domain */
@@ -112,6 +107,7 @@
                                                 * by mmlist_lock
                                                 */
 
+#ifndef XEN
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long start_brk, brk, start_stack;
        unsigned long arg_start, arg_end, env_start, env_end;
@@ -121,6 +117,7 @@
        unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
 
        unsigned dumpable:1;
+#endif
 #ifdef CONFIG_HUGETLB_PAGE
        int used_hugetlb;
 #endif
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h        Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/grant_table.h        Thu Mar  2 10:00:49 2006
@@ -17,7 +17,7 @@
 #define gnttab_shared_gmfn(d, t, i)                                     \
     ( ((d) == dom0) ?                                                   \
       ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
-      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
+      (assign_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
        1UL << (40 - PAGE_SHIFT))                                        \
     )
 
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/hypercall.h
--- a/xen/include/asm-ia64/hypercall.h  Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/hypercall.h  Thu Mar  2 10:00:49 2006
@@ -16,15 +16,4 @@
     u64 *pdone,
     u64 foreigndom);
 
-extern int
-do_lock_page(
-    VCPU *vcpu,
-    u64 va,
-    u64 lock);
-
-extern int
-do_set_shared_page(
-    VCPU *vcpu,
-    u64 gpa);
-
 #endif /* __ASM_IA64_HYPERCALL_H__ */
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux-xen/asm/README.origin
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin  Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/README.origin  Thu Mar  2 10:00:49 2006
@@ -5,6 +5,7 @@
 # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
 # easily updated to future versions of the corresponding Linux files.
 
+cache.h                -> linux/include/asm-ia64/cache.h
 gcc_intrin.h           -> linux/include/asm-ia64/gcc_intrin.h
 ia64regs.h             -> linux/include/asm-ia64/ia64regs.h
 io.h                   -> linux/include/asm-ia64/io.h
@@ -16,6 +17,7 @@
 pgtable.h              -> linux/include/asm-ia64/pgtable.h
 processor.h            -> linux/include/asm-ia64/processor.h
 ptrace.h               -> linux/include/asm-ia64/ptrace.h
+smp.h                  -> linux/include/asm-ia64/smp.h
 spinlock.h             -> linux/include/asm-ia64/spinlock.h
 system.h               -> linux/include/asm-ia64/system.h
 tlbflush.h             -> linux/include/asm-ia64/tlbflush.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux-xen/asm/processor.h
--- a/xen/include/asm-ia64/linux-xen/asm/processor.h    Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/processor.h    Thu Mar  2 10:00:49 2006
@@ -639,6 +639,19 @@
        return r;
 }
 
+#ifdef XEN
+/* Get the page table address and control bits.  */
+static inline __u64
+ia64_get_pta (void)
+{
+   __u64 r;
+   ia64_srlz_d();
+   r = ia64_getreg(_IA64_REG_CR_PTA);
+   ia64_srlz_d();
+   return r;
+}
+#endif
+
 static inline void
 ia64_set_dbr (__u64 regnum, __u64 value)
 {
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux-xen/asm/tlbflush.h
--- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h     Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h     Thu Mar  2 10:00:49 2006
@@ -103,6 +103,10 @@
         */
 }
 
+
 #define flush_tlb_kernel_range(start, end)     flush_tlb_all() /* XXX fix me */
+#ifdef XEN
+extern void flush_tlb_mask(cpumask_t mask);
+#endif
 
 #endif /* _ASM_IA64_TLBFLUSH_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux-xen/linux/README.origin
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin        Thu Mar  2 
09:59:34 2006
+++ b/xen/include/asm-ia64/linux-xen/linux/README.origin        Thu Mar  2 
10:00:49 2006
@@ -5,7 +5,6 @@
 # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
 # easily updated to future versions of the corresponding Linux files.
 
-cpumask.h              -> linux/include/linux/cpumask.h
 gfp.h                  -> linux/include/linux/gfp.h
 hardirq.h              -> linux/include/linux/hardirq.h
 interrupt.h            -> linux/include/linux/interrupt.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin  Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux/README.origin  Thu Mar  2 10:00:49 2006
@@ -4,31 +4,29 @@
 # needs to be changed, move it to ../linux-xen and follow
 # the instructions in the README there.
 
-bcd.h                  ->linux/include/linux/bcd.h
-bitmap.h               ->linux/include/linux/bitmap.h
-bitops.h               ->linux/include/linux/bitops.h
-dma-mapping.h          ->linux/include/linux/dma-mapping.h
-efi.h                  ->linux/include/linux/efi.h
-err.h                  ->linux/include/linux/err.h
-initrd.h               ->linux/include/linux/initrd.h
-jiffies.h              ->linux/include/linux/jiffies.h
-kmalloc_sizes.h                ->linux/include/linux/kmalloc_sizes.h
-linkage.h              ->linux/include/linux/linkage.h
-mmzone.h               ->linux/include/linux/mmzone.h
-notifier.h             ->linux/include/linux/notifier.h
-numa.h                 ->linux/include/linux/numa.h
-page-flags.h           ->linux/include/linux/page-flags.h
-percpu.h               ->linux/include/linux/percpu.h
-preempt.h              ->linux/include/linux/preempt.h
-rbtree.h               ->linux/include/linux/rbtree.h
-rwsem.h                        ->linux/include/linux/rwsem.h
-seqlock.h              ->linux/include/linux/seqlock.h
-slab.h                 ->linux/include/linux/slab.h
-sort.h                 ->linux/include/linux/sort.h
-stddef.h               ->linux/include/linux/stddef.h
-thread_info.h          ->linux/include/linux/thread_info.h
-threads.h              ->linux/include/linux/threads.h
-time.h                 ->linux/include/linux/time.h
-timex.h                        ->linux/include/linux/timex.h
-topology.h             ->linux/include/linux/topology.h
-wait.h                 ->linux/include/linux/wait.h
+bcd.h                  -> linux/include/linux/bcd.h
+bitmap.h               -> linux/include/linux/bitmap.h
+bitops.h               -> linux/include/linux/bitops.h
+dma-mapping.h          -> linux/include/linux/dma-mapping.h
+efi.h                  -> linux/include/linux/efi.h
+err.h                  -> linux/include/linux/err.h
+initrd.h               -> linux/include/linux/initrd.h
+jiffies.h              -> linux/include/linux/jiffies.h
+kmalloc_sizes.h                -> linux/include/linux/kmalloc_sizes.h
+linkage.h              -> linux/include/linux/linkage.h
+mmzone.h               -> linux/include/linux/mmzone.h
+notifier.h             -> linux/include/linux/notifier.h
+numa.h                 -> linux/include/linux/numa.h
+page-flags.h           -> linux/include/linux/page-flags.h
+percpu.h               -> linux/include/linux/percpu.h
+preempt.h              -> linux/include/linux/preempt.h
+rbtree.h               -> linux/include/linux/rbtree.h
+rwsem.h                        -> linux/include/linux/rwsem.h
+seqlock.h              -> linux/include/linux/seqlock.h
+sort.h                 -> linux/include/linux/sort.h
+stddef.h               -> linux/include/linux/stddef.h
+thread_info.h          -> linux/include/linux/thread_info.h
+time.h                 -> linux/include/linux/time.h
+timex.h                        -> linux/include/linux/timex.h
+topology.h             -> linux/include/linux/topology.h
+wait.h                 -> linux/include/linux/wait.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux/asm/README.origin
--- a/xen/include/asm-ia64/linux/asm/README.origin      Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux/asm/README.origin      Thu Mar  2 10:00:49 2006
@@ -4,6 +4,7 @@
 # needs to be changed, move it to ../linux-xen and follow
 # the instructions in the README there.
 
+acpi.h                 -> linux/include/asm-ia64/acpi.h
 asmmacro.h             -> linux/include/asm-ia64/asmmacro.h
 atomic.h               -> linux/include/asm-ia64/atomic.h
 bitops.h               -> linux/include/asm-ia64/bitops.h
@@ -11,7 +12,6 @@
 bug.h                  -> linux/include/asm-ia64/bug.h
 byteorder.h            -> linux/include/asm-ia64/byteorder.h
 cacheflush.h           -> linux/include/asm-ia64/cacheflush.h
-cache.h                        -> linux/include/asm-ia64/cache.h
 checksum.h             -> linux/include/asm-ia64/checksum.h
 current.h              -> linux/include/asm-ia64/current.h
 delay.h                        -> linux/include/asm-ia64/delay.h
@@ -46,9 +46,6 @@
 sections.h             -> linux/include/asm-ia64/sections.h
 semaphore.h            -> linux/include/asm-ia64/semaphore.h
 setup.h                        -> linux/include/asm-ia64/setup.h
-sigcontext.h           -> linux/include/asm-ia64/sigcontext.h
-signal.h               -> linux/include/asm-ia64/signal.h
-smp.h                  -> linux/include/asm-ia64/smp.h
 string.h               -> linux/include/asm-ia64/string.h
 thread_info.h          -> linux/include/asm-ia64/thread_info.h
 timex.h                        -> linux/include/asm-ia64/timex.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux/byteorder/README.origin
--- a/xen/include/asm-ia64/linux/byteorder/README.origin        Thu Mar  2 
09:59:34 2006
+++ b/xen/include/asm-ia64/linux/byteorder/README.origin        Thu Mar  2 
10:00:49 2006
@@ -4,6 +4,6 @@
 # needs to be changed, move it to ../linux-xen and follow
 # the instructions in the README there.
 
-generic.h              -> linux/include/byteorder/generic.h
-little_endian.h                -> linux/include/byteorder/little_endian.h
-swab.h                 -> linux/include/byteorder/swab.h
+generic.h              -> linux/include/linux/byteorder/generic.h
+little_endian.h                -> linux/include/linux/byteorder/little_endian.h
+swab.h                 -> linux/include/linux/byteorder/swab.h
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/mm.h Thu Mar  2 10:00:49 2006
@@ -133,6 +133,8 @@
 extern void __init init_frametable(void);
 #endif
 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
+
+extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
 
 static inline void put_page(struct page_info *page)
 {
@@ -215,8 +217,8 @@
 #endif
 
 // prototype of misc memory stuff
-unsigned long __get_free_pages(unsigned int mask, unsigned int order);
-void __free_pages(struct page *page, unsigned int order);
+//unsigned long __get_free_pages(unsigned int mask, unsigned int order);
+//void __free_pages(struct page *page, unsigned int order);
 void *pgtable_quicklist_alloc(void);
 void pgtable_quicklist_free(void *pgtable_entry);
 
@@ -407,6 +409,7 @@
 extern int nr_swap_pages;
 
 extern unsigned long *mpt_table;
+extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
 #undef machine_to_phys_mapping
 #define machine_to_phys_mapping        mpt_table
@@ -435,12 +438,22 @@
 
 /* Return I/O type if trye */
 #define __gpfn_is_io(_d, gpfn)                         \
-       (__gmfn_valid(_d, gpfn) ?                       \
-       (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
+({                                          \
+    u64 pte, ret=0;                                \
+    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
+    if(!(pte&GPFN_INV_MASK))        \
+        ret = pte & GPFN_IO_MASK;        \
+    ret;                \
+})
 
 #define __gpfn_is_mem(_d, gpfn)                                \
-       (__gmfn_valid(_d, gpfn) ?                       \
-       ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == 
GPFN_MEM) : 0)
+({                                          \
+    u64 pte, ret=0;                                \
+    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
+    if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM))   \
+        ret = 1;             \
+    ret;                \
+})
 
 
 #define __gpa_to_mpa(_d, gpa)   \
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/multicall.h
--- a/xen/include/asm-ia64/multicall.h  Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/multicall.h  Thu Mar  2 10:00:49 2006
@@ -1,5 +1,27 @@
 #ifndef __ASM_IA64_MULTICALL_H__
 #define __ASM_IA64_MULTICALL_H__
 
-#define do_multicall_call(_call) BUG()
+#include <public/xen.h>
+
+typedef unsigned long (*hypercall_t)(
+                       unsigned long arg0,
+                       unsigned long arg1,
+                       unsigned long arg2,
+                       unsigned long arg3,
+                       unsigned long arg4,
+                       unsigned long arg5);
+
+extern hypercall_t ia64_hypercall_table[];
+
+static inline void do_multicall_call(multicall_entry_t *call)
+{
+       call->result = (*ia64_hypercall_table[call->op])(
+                       call->args[0],
+                       call->args[1],
+                       call->args[2],
+                       call->args[3],
+                       call->args[4],
+                       call->args[5]);
+}
+
 #endif /* __ASM_IA64_MULTICALL_H__ */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h     Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/privop.h     Thu Mar  2 10:00:49 2006
@@ -209,4 +209,6 @@
 
 extern void privify_memory(void *start, UINT64 len);
 
+extern int ia64_hyperprivop(unsigned long iim, REGS *regs);
+
 #endif
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vcpu.h
--- a/xen/include/asm-ia64/vcpu.h       Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vcpu.h       Thu Mar  2 10:00:49 2006
@@ -34,11 +34,13 @@
 #endif
 
 /* general registers */
-extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
-extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val);
-extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat);
+extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned long reg);
+extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val);
+extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int 
nat);
 /* application registers */
+extern void vcpu_load_kernel_regs(VCPU *vcpu);
 extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
+extern IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val);
 /* psr */
 extern BOOLEAN vcpu_get_psr_ic(VCPU *vcpu);
 extern UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr);
@@ -46,6 +48,9 @@
 extern IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm);
 extern IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm);
 extern IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
+extern IA64FAULT vcpu_set_psr_i(VCPU *vcpu);
+extern IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu);
+extern IA64FAULT vcpu_set_psr_dt(VCPU *vcpu);
 /* control registers */
 extern IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val);
 extern IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val);
@@ -89,6 +94,8 @@
 extern IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval);
 extern IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval);
 /* interrupt registers */
+extern void vcpu_pend_unspecified_interrupt(VCPU *vcpu);
+extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
 extern IA64FAULT vcpu_get_itv(VCPU *vcpu,UINT64 *pval);
 extern IA64FAULT vcpu_get_pmv(VCPU *vcpu,UINT64 *pval);
 extern IA64FAULT vcpu_get_cmcv(VCPU *vcpu,UINT64 *pval);
@@ -97,8 +104,8 @@
 extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
 extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
 /* interval timer registers */
-extern IA64FAULT vcpu_set_itm(VCPU *vcpu,UINT64 val);
 extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
+extern UINT64 vcpu_timer_pending_early(VCPU *vcpu);
 /* debug breakpoint registers */
 extern IA64FAULT vcpu_set_ibr(VCPU *vcpu,UINT64 reg,UINT64 val);
 extern IA64FAULT vcpu_set_dbr(VCPU *vcpu,UINT64 reg,UINT64 val);
@@ -135,9 +142,14 @@
 extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
 extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, 
UINT64 *pteval, UINT64 *itir, UINT64 *iha);
 extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
+extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
+extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);
 /* misc */
 extern IA64FAULT vcpu_rfi(VCPU *vcpu);
 extern IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
+extern IA64FAULT vcpu_cover(VCPU *vcpu);
+extern IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
+extern IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval);
 
 extern void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
 extern void vcpu_pend_timer(VCPU *vcpu);
@@ -149,4 +161,16 @@
 extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
 extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
 
+static inline UINT64
+itir_ps(UINT64 itir)
+{
+    return ((itir >> 2) & 0x3f);
+}
+
+static inline UINT64
+itir_mask(UINT64 itir)
+{
+    return (~((1UL << itir_ps(itir)) - 1));
+}
+
 #endif
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vhpt.h
--- a/xen/include/asm-ia64/vhpt.h       Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vhpt.h       Thu Mar  2 10:00:49 2006
@@ -121,6 +121,11 @@
 extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
                         unsigned logps);
 extern void vhpt_flush(void);
+
+/* Currently the VHPT is allocated per CPU.  */
+DECLARE_PER_CPU (unsigned long, vhpt_paddr);
+DECLARE_PER_CPU (unsigned long, vhpt_pend);
+
 #endif /* !__ASSEMBLY */
 
 #if !VHPT_ENABLED
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmmu.h       Thu Mar  2 10:00:49 2006
@@ -22,13 +22,23 @@
 
 #ifndef XEN_TLBthash_H
 #define XEN_TLBthash_H
+
+#define         MAX_CCN_DEPTH           15       // collision chain depth
+#define         VCPU_VTLB_SHIFT          (20)    // 1M for VTLB
+#define         VCPU_VTLB_SIZE           (1UL<<VCPU_VTLB_SHIFT)
+#define         VCPU_VTLB_ORDER          (VCPU_VTLB_SHIFT - PAGE_SHIFT)
+#define         VCPU_VHPT_SHIFT          (24)    // 16M for VTLB
+#define         VCPU_VHPT_SIZE           (1UL<<VCPU_VHPT_SHIFT)
+#define         VCPU_VHPT_ORDER          (VCPU_VHPT_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
 
 #include <xen/config.h>
 #include <xen/types.h>
 #include <public/xen.h>
 #include <asm/tlb.h>
 #include <asm/regionreg.h>
-
+#include <asm/vmx_mm_def.h>
 //#define         THASH_TLB_TR            0
 //#define         THASH_TLB_TC            1
 
@@ -39,7 +49,15 @@
 
 /*
  * Next bit definition must be same with THASH_TLB_XX
- */
+#define         PTA_BASE_SHIFT          (15)
+ */
+
+
+
+
+#define HIGH_32BITS(x)  bits(x,32,63)
+#define LOW_32BITS(x)   bits(x,0,31)
+
 typedef union search_section {
         struct {
                 u32 tr : 1;
@@ -49,15 +67,6 @@
         u32     v;
 } search_section_t;
 
-#define         MAX_CCN_DEPTH           4       // collision chain depth
-#define         VCPU_TLB_SHIFT          (22)
-#define         VCPU_TLB_SIZE           (1UL<<VCPU_TLB_SHIFT)
-#define         VCPU_TLB_ORDER          VCPU_TLB_SHIFT - PAGE_SHIFT
-#define         PTA_BASE_SHIFT          (15)
-
-#ifndef __ASSEMBLY__
-#define HIGH_32BITS(x)  bits(x,32,63)
-#define LOW_32BITS(x)   bits(x,0,31)
 
 typedef enum {
         ISIDE_TLB=0,
@@ -77,18 +86,21 @@
             u64 ppn  : 38; // 12-49
             u64 rv2  :  2; // 50-51
             u64 ed   :  1; // 52
-            u64 ig1  :  11; //53-63
+            u64 ig1  :  3; // 53-55
+            u64 len  :  4; // 56-59
+            u64 ig2  :  3; // 60-63
         };
         struct {
             u64 __rv1 : 53;    // 0-52
+            u64 contiguous : 1; //53
+            u64 tc : 1;     // 54 TR or TC
+            CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line
             // next extension to ig1, only for TLB instance
-            u64 tc : 1;     // 53 TR or TC
-            u64 locked  : 1;   // 54 entry locked or not
-            CACHE_LINE_TYPE cl : 1; // I side or D side cache line
-            u64 nomap : 1;   // entry cann't be inserted into machine TLB.
-            u64 __ig1  :  5; // 56-61
-            u64 checked : 1; // for VTLB/VHPT sanity check
-            u64 invalid : 1; // invalid entry
+            u64 __ig1  :  4; // 56-59
+            u64 locked  : 1;   // 60 entry locked or not
+            u64 nomap : 1;   // 61 entry cann't be inserted into machine TLB.
+            u64 checked : 1; // 62 for VTLB/VHPT sanity check
+            u64 invalid : 1; // 63 invalid entry
         };
         u64 page_flags;
     };                  // same for VHPT and TLB
@@ -128,10 +140,37 @@
     };
 } thash_data_t;
 
+#define INVALIDATE_VHPT_HEADER(hdata)   \
+{      ((hdata)->page_flags)=0;        \
+       ((hdata)->ti)=1;        \
+       ((hdata)->next)=0; }
+
+#define INVALIDATE_TLB_HEADER(hdata)   \
+{      ((hdata)->page_flags)=0;        \
+       ((hdata)->ti)=1;                \
+       ((hdata)->next)=0; }
+
 #define INVALID_VHPT(hdata)     ((hdata)->ti)
-#define INVALID_TLB(hdata)      ((hdata)->invalid)
-#define INVALID_ENTRY(hcb, hdata)                       \
-        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata))
+#define INVALID_TLB(hdata)      ((hdata)->ti)
+#define INVALID_TR(hdata)      ((hdata)->invalid)
+#define INVALID_ENTRY(hcb, hdata)       INVALID_VHPT(hdata)
+
+/*        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
+
+
+/*
+ * Architecture ppn is in 4KB unit while XEN
+ * page may be different(1<<PAGE_SHIFT).
+ */
+static inline u64 arch_to_xen_ppn(u64 appn)
+{
+    return (appn >>(PAGE_SHIFT-ARCH_PAGE_SHIFT));
+}
+
+static inline u64 xen_to_arch_ppn(u64 xppn)
+{
+    return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT));
+}
 
 typedef enum {
         THASH_TLB=0,
@@ -166,11 +205,11 @@
         struct thash_cb  *vhpt;
 } tlb_special_t;
 
-typedef struct vhpt_cb {
+//typedef struct vhpt_cb {
         //u64     pta;    // pta value.
-        GET_MFN_FN      *get_mfn;
-        TTAG_FN         *tag_func;
-} vhpt_special;
+//        GET_MFN_FN      *get_mfn;
+//        TTAG_FN         *tag_func;
+//} vhpt_special;
 
 typedef struct thash_internal {
         thash_data_t *hash_base;
@@ -198,36 +237,38 @@
         u64     hash_sz;        // size of above data.
         void    *cch_buf;       // base address of collision chain.
         u64     cch_sz;         // size of above data.
-        THASH_FN        *hash_func;
-        GET_RR_FN       *get_rr_fn;
-        RECYCLE_FN      *recycle_notifier;
+//        THASH_FN        *hash_func;
+//        GET_RR_FN       *get_rr_fn;
+//        RECYCLE_FN      *recycle_notifier;
         thash_cch_mem_t *cch_freelist;
         struct vcpu *vcpu;
         PTA     pta;
         /* VTLB/VHPT common information */
-        FIND_OVERLAP_FN *find_overlap;
-        FIND_NEXT_OVL_FN *next_overlap;
-        REM_THASH_FN    *rem_hash; // remove hash entry.
-        INS_THASH_FN    *ins_hash; // insert hash entry.
-        REM_NOTIFIER_FN *remove_notifier;
+//        FIND_OVERLAP_FN *find_overlap;
+//        FIND_NEXT_OVL_FN *next_overlap;
+//        REM_THASH_FN    *rem_hash; // remove hash entry.
+//        INS_THASH_FN    *ins_hash; // insert hash entry.
+//        REM_NOTIFIER_FN *remove_notifier;
         /* private information */
-        thash_internal_t  priv;
+//        thash_internal_t  priv;
         union {
                 tlb_special_t  *ts;
-                vhpt_special   *vs;
+//                vhpt_special   *vs;
         };
         // Internal positon information, buffer and storage etc. TBD
 } thash_cb_t;
 
 #define ITR(hcb,id)             ((hcb)->ts->itr[id])
 #define DTR(hcb,id)             ((hcb)->ts->dtr[id])
-#define INVALIDATE_HASH(hcb,hash)           {   \
-           if ((hcb)->ht==THASH_TLB)            \
-             INVALID_TLB(hash) = 1;             \
-           else                                 \
-             INVALID_VHPT(hash) = 1;            \
-           hash->next = NULL; }
-
+#define INVALIDATE_HASH_HEADER(hcb,hash)    INVALIDATE_TLB_HEADER(hash)
+/*              \
+{           if ((hcb)->ht==THASH_TLB){            \
+            INVALIDATE_TLB_HEADER(hash);             \
+           }else{                                 \
+             INVALIDATE_VHPT_HEADER(hash);            \
+            }                                       \
+}
+ */
 #define PURGABLE_ENTRY(hcb,en)  1
 //             ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
 
@@ -242,18 +283,20 @@
  *    NOTES:
  *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  *         itr[]/dtr[] need to be updated too.
- *      2: Inserting to collision chain may trigger recycling if 
+ *      2: Inserting to collision chain may trigger recycling if
  *         the buffer for collision chain is empty.
  *      3: The new entry is inserted at the hash table.
  *         (I.e. head of the collision chain)
  *      4: Return the entry in hash table or collision chain.
  *
  */
-extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
+extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
+//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
 extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int 
idx);
-
-/*
- * Force to delete a found entry no matter TR or foreign map for TLB. 
+extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, 
char cl);
+extern u64 get_mfn(struct domain *d, u64 gpfn);
+/*
+ * Force to delete a found entry no matter TR or foreign map for TLB.
  *    NOTES:
  *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  *         itr[]/dtr[] need to be updated too.
@@ -307,7 +350,7 @@
                         u64 rid, u64 va, u64 sz, 
                         search_section_t p_sect, 
                         CACHE_LINE_TYPE cl);
-extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va);
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
@@ -335,8 +378,11 @@
 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
 extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
 extern thash_cb_t *init_domain_tlb(struct vcpu *d);
-
-#define   VTLB_DEBUG
+extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
+extern thash_data_t * vhpt_lookup(u64 va);
+extern void machine_tlb_purge(u64 va, u64 ps);
+
+//#define   VTLB_DEBUG
 #ifdef   VTLB_DEBUG
 extern void check_vtlb_sanity(thash_cb_t *vtlb);
 extern void dump_vtlb(thash_cb_t *vtlb);
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx.h        Thu Mar  2 10:00:49 2006
@@ -34,7 +34,24 @@
 extern void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c);
 extern void vmx_wait_io(void);
 extern void vmx_io_assist(struct vcpu *v);
-
+extern void vmx_load_all_rr(struct vcpu *vcpu);
+extern void panic_domain(struct pt_regs *regs, const char *fmt, ...);
+extern int ia64_hypercall (struct pt_regs *regs);
+extern void vmx_save_state(struct vcpu *v);
+extern void vmx_load_state(struct vcpu *v);
+extern void show_registers(struct pt_regs *regs);
+extern int vmx_alloc_contig_pages(struct domain *d);
+extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long 
gpfn);
+extern void sync_split_caches(void);
+extern void vmx_virq_line_assist(struct vcpu *v);
+extern void set_privileged_operation_isr (struct vcpu *vcpu,int inst);
+extern void privilege_op (struct vcpu *vcpu);
+extern void set_ifa_itir_iha (struct vcpu *vcpu, u64 vadr,
+          int set_ifa, int set_itir, int set_iha);
+extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
+extern void vmx_intr_assist(struct vcpu *v);
+extern void set_illegal_op_isr (struct vcpu *vcpu);
+extern  void illegal_op (struct vcpu *vcpu);
 static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
 {
     return &((shared_iopage_t 
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx_mm_def.h
--- a/xen/include/asm-ia64/vmx_mm_def.h Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx_mm_def.h Thu Mar  2 10:00:49 2006
@@ -27,8 +27,6 @@
 //#define VHPT_SIZE   (1 << VHPT_SIZE_PS)
 #define ARCH_PAGE_SHIFT   12
 #define ARCH_PAGE_SIZE    PSIZE(ARCH_PAGE_SHIFT)
-#define INVALID_MFN    (-1)
-
 #define MAX_PHYS_ADDR_BITS  50
 #define PMASK(size)         (~((size) - 1))
 #define PSIZE(size)         (1UL<<(size))
@@ -36,7 +34,7 @@
 #define POFFSET(vaddr, ps)  ((vaddr) & (PSIZE(ps) - 1))
 #define PPN_2_PA(ppn)       ((ppn)<<12)
 #define CLEARLSB(ppn, nbits)    ((((uint64_t)ppn) >> (nbits)) << (nbits))
-#define PAGEALIGN(va, ps)      (va & ~(PSIZE(ps)-1))
+#define PAGEALIGN(va, ps)      CLEARLSB(va, ps)
 
 #define TLB_AR_R        0
 #define TLB_AR_RX       1
@@ -86,9 +84,6 @@
 
 #define STLB_TC         0
 #define STLB_TR         1
-
-#define VMM_RR_MASK     0xfffff
-#define VMM_RR_SHIFT        20
 
 #define IA64_RR_SHIFT       61
 
@@ -145,6 +140,7 @@
     uint64_t    result;
     __asm __volatile("shl %0=%1, %2;; shr.u %0=%0, %3;;"
         : "=r" (result): "r"(v), "r"(63-be), "r" (bs+63-be) );
+    return result;
 }
 
 #define bits(val, bs, be)                                         \
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx_pal.h
--- a/xen/include/asm-ia64/vmx_pal.h    Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx_pal.h    Thu Mar  2 10:00:49 2006
@@ -114,7 +114,7 @@
        PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
        return iprv.status;
 }
-
+extern void pal_emul(struct vcpu *vcpu);
 #define PAL_PROC_VM_BIT                (1UL << 40)
 #define PAL_PROC_VMSW_BIT      (1UL << 54)
 #endif /* _ASM_IA64_VT_PAL_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx_phy_mode.h
--- a/xen/include/asm-ia64/vmx_phy_mode.h       Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx_phy_mode.h       Thu Mar  2 10:00:49 2006
@@ -96,6 +96,8 @@
 extern void recover_if_physical_mode(VCPU *vcpu);
 extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_load_all_rr(VCPU *vcpu);
+extern void physical_itlb_miss(VCPU *vcpu, u64 vadr);
+extern void physical_dtlb_miss(VCPU *vcpu, u64 vadr);
 /*
  * No sanity check here, since all psr changes have been
  * checked in switch_mm_mode().
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx_platform.h
--- a/xen/include/asm-ia64/vmx_platform.h       Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx_platform.h       Thu Mar  2 10:00:49 2006
@@ -54,7 +54,7 @@
 #define VCPU(_v,_x)    _v->arch.privregs->_x
 #define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16)
 #define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0])
-
+struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t 
vector, uint32_t bitmap);
 extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector);
 static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig)
 {
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Thu Mar  2 10:00:49 2006
@@ -51,8 +51,7 @@
 
 #define VMM_RR_SHIFT    20
 #define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
-//#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
-                ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
+
 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
 extern u64 cr_igfld_mask (int index, u64 value);
 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
@@ -118,7 +117,16 @@
 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, 
size_t s);
 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, 
size_t s);
 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
-
+extern void vcpu_load_kernel_regs(VCPU *vcpu);
+extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
+extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
+
+extern void dtlb_fault (VCPU *vcpu, u64 vadr);
+extern void nested_dtlb (VCPU *vcpu);
+extern void alt_dtlb (VCPU *vcpu, u64 vadr);
+extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
+extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
+extern void page_not_present(VCPU *vcpu, u64 vadr);
 
 /**************************************************************************
  VCPU control register access routines
@@ -461,10 +469,10 @@
 vmx_vrrtomrr(VCPU *v, unsigned long val)
 {
     ia64_rr rr;
-    u64          rid;
 
     rr.rrval=val;
     rr.rid = rr.rid + v->arch.starting_rid;
+    rr.ps = PAGE_SHIFT;
     rr.ve = 1;
     return  vmMangleRID(rr.rrval);
 /* Disable this rid allocation algorithm for now */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/xenkregs.h
--- a/xen/include/asm-ia64/xenkregs.h   Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/xenkregs.h   Thu Mar  2 10:00:49 2006
@@ -8,7 +8,8 @@
 #define        IA64_TR_VHPT            4       /* dtr4: vhpt */
 #define IA64_TR_ARCH_INFO      5
 #define IA64_TR_PERVP_VHPT     6
-
+#define IA64_DTR_GUEST_KERNEL   7
+#define IA64_ITR_GUEST_KERNEL   2
 /* Processor status register bits: */
 #define IA64_PSR_VM_BIT                46
 #define IA64_PSR_VM    (__IA64_UL(1) << IA64_PSR_VM_BIT)
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h  Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/xensystem.h  Thu Mar  2 10:00:49 2006
@@ -78,7 +78,6 @@
 #define __cmpxchg_user(ptr, new, old, _size)                           \
 ({                                                                     \
        register long __gu_r8 asm ("r8");                               \
-       register long __gu_r9 asm ("r9");                               \
        asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));                  \
        asm volatile ("mov %1=r0;;\n"                                   \
                "[1:]\tcmpxchg"_size".acq %0=[%2],%3,ar.ccv\n"          \
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Thu Mar  2 09:59:34 2006
+++ b/xen/include/public/arch-ia64.h    Thu Mar  2 10:00:49 2006
@@ -249,7 +249,7 @@
             int interrupt_delivery_enabled; // virtual psr.i
             int pending_interruption;
             int incomplete_regframe; // see SDM vol2 6.8
-            unsigned long delivery_mask[4];
+            unsigned long reserved5_1[4];
             int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
             int banknum; // 0 or 1, which virtual register bank is active
             unsigned long rrs[8]; // region registers
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
linux-2.6-xen-sparse/include/asm-ia64/meminit.h
--- /dev/null   Thu Mar  2 09:59:34 2006
+++ b/linux-2.6-xen-sparse/include/asm-ia64/meminit.h   Thu Mar  2 10:00:49 2006
@@ -0,0 +1,67 @@
+#ifndef meminit_h
+#define meminit_h
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+
+/*
+ * Entries defined so far:
+ *     - boot param structure itself
+ *     - memory map
+ *     - initrd (optional)
+ *     - command line string
+ *     - kernel code & data
+ *     - Kernel memory map built from EFI memory map
+ *     - xen start info
+ *
+ * More could be added if necessary
+ */
+#ifndef CONFIG_XEN
+#define IA64_MAX_RSVD_REGIONS 6
+#else
+#define IA64_MAX_RSVD_REGIONS 7
+#endif
+
+struct rsvd_region {
+       unsigned long start;    /* virtual address of beginning of element */
+       unsigned long end;      /* virtual address of end of element + 1 */
+};
+
+extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+extern int num_rsvd_regions;
+
+extern void find_memory (void);
+extern void reserve_memory (void);
+extern void find_initrd (void);
+extern int filter_rsvd_memory (unsigned long start, unsigned long end, void 
*arg);
+extern void efi_memmap_init(unsigned long *, unsigned long *);
+
+/*
+ * For rounding an address to the next IA64_GRANULE_SIZE or order
+ */
+#define GRANULEROUNDDOWN(n)    ((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n)      (((n)+IA64_GRANULE_SIZE-1) & 
~(IA64_GRANULE_SIZE-1))
+#define ORDERROUNDDOWN(n)      ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
+
+#ifdef CONFIG_NUMA
+  extern void call_pernode_memory (unsigned long start, unsigned long len, 
void *func);
+#else
+# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
+#endif
+
+#define IGNORE_PFN0    1       /* XXX fix me: ignore pfn 0 until TLB miss 
handler is updated... */
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP     0x40000000 /* Use virtual mem map if hole is > than 
this */
+  extern unsigned long vmalloc_end;
+  extern struct page *vmem_map;
+  extern int find_largest_hole (u64 start, u64 end, void *arg);
+  extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
+#endif
+
+#endif /* meminit_h */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux-xen/asm/smp.h
--- /dev/null   Thu Mar  2 09:59:34 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/smp.h  Thu Mar  2 10:00:49 2006
@@ -0,0 +1,143 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
+ * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *     Bjorn Helgaas <bjorn.helgaas@xxxxxx>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+static inline unsigned int
+ia64_get_lid (void)
+{
+       union {
+               struct {
+                       unsigned long reserved : 16;
+                       unsigned long eid : 8;
+                       unsigned long id : 8;
+                       unsigned long ignored : 32;
+               } f;
+               unsigned long bits;
+       } lid;
+
+       lid.bits = ia64_getreg(_IA64_REG_CR_LID);
+       return lid.f.id << 8 | lid.f.eid;
+}
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET             0x1e0008
+
+#define SMP_IRQ_REDIRECTION    (1 << 0)
+#define SMP_IPI_REDIRECTION    (1 << 1)
+
+#ifdef XEN
+#define raw_smp_processor_id() (current->processor)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#endif
+
+extern struct smp_boot_data {
+       int cpu_count;
+       int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
+
+extern char no_int_routing __devinitdata;
+
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_core_map[NR_CPUS];
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern int smp_num_siblings;
+extern int smp_num_cpucores;
+extern void __iomem *ipi_base_addr;
+extern unsigned char smp_int_redirect;
+
+extern volatile int ia64_cpu_to_sapicid[];
+#define cpu_physical_id(i)     ia64_cpu_to_sapicid[i]
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * Function to map hard smp processor id to logical id.  Slow, so don't use 
this in
+ * performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+       int i;
+
+       for (i = 0; i < NR_CPUS; ++i)
+               if (cpu_physical_id(i) == cpuid)
+                       break;
+       return i;
+}
+
+/*
+ * XTP control functions:
+ *     min_xtp   : route all interrupts to this CPU
+ *     normal_xtp: nominal XTP value
+ *     max_xtp   : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+#define hard_smp_processor_id()                ia64_get_lid()
+
+/* Upping and downing of CPUs */
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern int __cpu_up (unsigned int cpu);
+extern void __init smp_build_cpu_map(void);
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
+                                    int retry, int wait);
+extern void smp_send_reschedule (int cpu);
+extern void lock_ipi_calllock(void);
+extern void unlock_ipi_calllock(void);
+extern void identify_siblings (struct cpuinfo_ia64 *);
+
+#else
+
+#define cpu_logical_id(i)              0
+#define cpu_physical_id(i)             ia64_get_lid()
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_SMP_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed 
xen/include/asm-ia64/linux-xen/linux/cpumask.h
--- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h    Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,397 +0,0 @@
-#ifndef __LINUX_CPUMASK_H
-#define __LINUX_CPUMASK_H
-
-/*
- * Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
- * For details of cpulist_scnprintf() and cpulist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- *
- * The available cpumask operations are:
- *
- * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
- * void cpus_setall(mask)              set all bits
- * void cpus_clear(mask)               clear all bits
- * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
- * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
- * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
- * void cpus_complement(dst, src)      dst = ~src
- *
- * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
- * int cpus_empty(mask)                        Is mask empty (no bits sets)?
- * int cpus_full(mask)                 Is mask full (all bits sets)?
- * int cpus_weight(mask)               Hamming weigh - number of set bits
- *
- * void cpus_shift_right(dst, src, n)  Shift right
- * void cpus_shift_left(dst, src, n)   Shift left
- *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
- * CPU_MASK_ALL                                Initializer - all bits set
- * CPU_MASK_NONE                       Initializer - no bits set
- * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
- * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
- * int cpulist_parse(buf, map)         Parse ascii string as cpulist
- *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
- *
- * int num_online_cpus()               Number of online CPUs
- * int num_possible_cpus()             Number of all possible CPUs
- * int num_present_cpus()              Number of present CPUs
- *
- * int cpu_online(cpu)                 Is some cpu online?
- * int cpu_possible(cpu)               Is some cpu possible?
- * int cpu_present(cpu)                        Is some cpu present (can 
schedule)?
- *
- * int any_online_cpu(mask)            First online cpu in mask
- *
- * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- *    to generate slightly worse code.  Note for example the additional
- *    40 lines of assembly code compiling the "for each possible cpu"
- *    loops buried in the disk_stat_read() macros calls when compiling
- *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
- *    one-line #define for cpu_isset(), instead of wrapping an inline
- *    inside a macro, the way we do the other calls.
- */
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/bitmap.h>
-#include <asm/bug.h>
-
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
-       set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
-       clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
-       bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
-       bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
-       return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int nbits)
-{
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), 
NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
-                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
-}
-
-#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
-}
-
-#define cpumask_of_cpu(cpu)                                            \
-({                                                                     \
-       typeof(_unused_cpumask_arg_) m;                                 \
-       if (sizeof(m) == sizeof(unsigned long)) {                       \
-               m.bits[0] = 1UL<<(cpu);                                 \
-       } else {                                                        \
-               cpus_clear(m);                                          \
-               cpu_set((cpu), m);                                      \
-       }                                                               \
-       m;                                                              \
-})
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#else
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#endif
-
-#define CPU_MASK_NONE                                                  \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL                         \
-} }
-
-#define CPU_MASK_CPU0                                                  \
-(cpumask_t) { {                                                                
\
-       [0] =  1UL                                                      \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpumask_scnprintf(buf, len, src) \
-                       __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpumask_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
-{
-       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpumask_parse(ubuf, ulen, dst) \
-                       __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
-static inline int __cpumask_parse(const char __user *buf, int len,
-                                       cpumask_t *dstp, int nbits)
-{
-       return bitmap_parse(buf, len, dstp->bits, nbits);
-}
-
-#define cpulist_scnprintf(buf, len, src) \
-                       __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpulist_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
-{
-       return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
-static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
-{
-       return bitmap_parselist(buf, dstp->bits, nbits);
-}
-
-#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < NR_CPUS;                \
-               (cpu) = next_cpu((cpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
-#endif /* NR_CPUS */
-
-/*
- * The following particular system cpumasks and operations manage
- * possible, present and online cpus.  Each of them is a fixed size
- * bitmap of size NR_CPUS.
- *
- *  #ifdef CONFIG_HOTPLUG_CPU
- *     cpu_possible_map - all NR_CPUS bits set
- *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #else
- *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
- *     cpu_present_map  - copy of cpu_possible_map
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #endif
- *
- *  In either case, NR_CPUS is fixed at compile time, as the static
- *  size of these bitmaps.  The cpu_possible_map is fixed at boot
- *  time, as the set of CPU id's that it is possible might ever
- *  be plugged in at anytime during the life of that system boot.
- *  The cpu_present_map is dynamic(*), representing which CPUs
- *  are currently plugged in.  And cpu_online_map is the dynamic
- *  subset of cpu_present_map, indicating those CPUs available
- *  for scheduling.
- *
- *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
- *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
- *  ACPI reports present at boot.
- *
- *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
- *  depending on what ACPI reports as currently plugged in, otherwise
- *  cpu_present_map is just a copy of cpu_possible_map.
- *
- *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
- *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
- *
- * Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
- *    assumption that their single CPU is online.  The UP
- *    cpu_{online,possible,present}_maps are placebos.  Changing them
- *    will have no useful affect on the following num_*_cpus()
- *    and cpu_*() macros in the UP case.  This ugliness is a UP
- *    optimization - don't waste any instructions or memory references
- *    asking if you're online or how many CPUs there are if there is
- *    only one CPU.
- * 2) Most SMP arch's #define some of these maps to be some
- *    other map specific to that arch.  Therefore, the following
- *    must be #define macros, not inlines.  To see why, examine
- *    the assembly code produced by the following.  Note that
- *    set1() writes phys_x_map, but set2() writes x_map:
- *        int x_map, phys_x_map;
- *        #define set1(a) x_map = a
- *        inline void set2(int a) { x_map = a; }
- *        #define x_map phys_x_map
- *        main(){ set1(3); set2(5); }
- */
-
-extern cpumask_t cpu_possible_map;
-#ifndef XEN
-extern cpumask_t cpu_online_map;
-#endif
-extern cpumask_t cpu_present_map;
-
-#if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
-#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
-#else
-#define num_online_cpus()      1
-#define num_possible_cpus()    1
-#define num_present_cpus()     1
-#define cpu_online(cpu)                ((cpu) == 0)
-#define cpu_possible(cpu)      ((cpu) == 0)
-#define cpu_present(cpu)       ((cpu) == 0)
-#endif
-
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
-
-#endif /* __LINUX_CPUMASK_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/asm/sigcontext.h
--- a/xen/include/asm-ia64/linux/asm/sigcontext.h       Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,70 +0,0 @@
-#ifndef _ASM_IA64_SIGCONTEXT_H
-#define _ASM_IA64_SIGCONTEXT_H
-
-/*
- * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
- * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-
-#include <asm/fpu.h>
-
-#define IA64_SC_FLAG_ONSTACK_BIT               0       /* is handler running 
on signal stack? */
-#define IA64_SC_FLAG_IN_SYSCALL_BIT            1       /* did signal interrupt 
a syscall? */
-#define IA64_SC_FLAG_FPH_VALID_BIT             2       /* is state in 
f[32]-f[127] valid? */
-
-#define IA64_SC_FLAG_ONSTACK           (1 << IA64_SC_FLAG_ONSTACK_BIT)
-#define IA64_SC_FLAG_IN_SYSCALL                (1 << 
IA64_SC_FLAG_IN_SYSCALL_BIT)
-#define IA64_SC_FLAG_FPH_VALID         (1 << IA64_SC_FLAG_FPH_VALID_BIT)
-
-# ifndef __ASSEMBLY__
-
-/*
- * Note on handling of register backing store: sc_ar_bsp contains the address 
that would
- * be found in ar.bsp after executing a "cover" instruction the context in 
which the
- * signal was raised.  If signal delivery required switching to an alternate 
signal stack
- * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after 
executing the
- * imaginary "cover" instruction) is backed by the *alternate* signal stack, 
not the
- * original one.  In this case, sc_rbs_base contains the base address of the 
new register
- * backing store.  The number of registers in the dirty partition can be 
calculated as:
- *
- *   ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
- *
- */
-
-struct sigcontext {
-       unsigned long           sc_flags;       /* see manifest constants above 
*/
-       unsigned long           sc_nat;         /* bit i == 1 iff scratch reg 
gr[i] is a NaT */
-       stack_t                 sc_stack;       /* previously active stack */
-
-       unsigned long           sc_ip;          /* instruction pointer */
-       unsigned long           sc_cfm;         /* current frame marker */
-       unsigned long           sc_um;          /* user mask bits */
-       unsigned long           sc_ar_rsc;      /* register stack configuration 
register */
-       unsigned long           sc_ar_bsp;      /* backing store pointer */
-       unsigned long           sc_ar_rnat;     /* RSE NaT collection register 
*/
-       unsigned long           sc_ar_ccv;      /* compare and exchange compare 
value register */
-       unsigned long           sc_ar_unat;     /* ar.unat of interrupted 
context */
-       unsigned long           sc_ar_fpsr;     /* floating-point status 
register */
-       unsigned long           sc_ar_pfs;      /* previous function state */
-       unsigned long           sc_ar_lc;       /* loop count register */
-       unsigned long           sc_pr;          /* predicate registers */
-       unsigned long           sc_br[8];       /* branch registers */
-       /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
-       unsigned long           sc_gr[32];      /* general registers (static 
partition) */
-       struct ia64_fpreg       sc_fr[128];     /* floating-point registers */
-
-       unsigned long           sc_rbs_base;    /* NULL or new base of 
sighandler's rbs */
-       unsigned long           sc_loadrs;      /* see description above */
-
-       unsigned long           sc_ar25;        /* cmp8xchg16 uses this */
-       unsigned long           sc_ar26;        /* rsvd for scratch use */
-       unsigned long           sc_rsvd[12];    /* reserved for future use */
-       /*
-        * The mask must come last so we can increase _NSIG_WORDS
-        * without breaking binary compatibility.
-        */
-       sigset_t                sc_mask;        /* signal mask to restore after 
handler returns */
-};
-
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_IA64_SIGCONTEXT_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/asm/signal.h
--- a/xen/include/asm-ia64/linux/asm/signal.h   Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,166 +0,0 @@
-#ifndef _ASM_IA64_SIGNAL_H
-#define _ASM_IA64_SIGNAL_H
-
-/*
- * Modified 1998-2001, 2003
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>, Hewlett-Packard Co
- *
- * Unfortunately, this file is being included by bits/signal.h in
- * glibc-2.x.  Hence the #ifdef __KERNEL__ ugliness.
- */
-
-#define SIGHUP          1
-#define SIGINT          2
-#define SIGQUIT                 3
-#define SIGILL          4
-#define SIGTRAP                 5
-#define SIGABRT                 6
-#define SIGIOT          6
-#define SIGBUS          7
-#define SIGFPE          8
-#define SIGKILL                 9
-#define SIGUSR1                10
-#define SIGSEGV                11
-#define SIGUSR2                12
-#define SIGPIPE                13
-#define SIGALRM                14
-#define SIGTERM                15
-#define SIGSTKFLT      16
-#define SIGCHLD                17
-#define SIGCONT                18
-#define SIGSTOP                19
-#define SIGTSTP                20
-#define SIGTTIN                21
-#define SIGTTOU                22
-#define SIGURG         23
-#define SIGXCPU                24
-#define SIGXFSZ                25
-#define SIGVTALRM      26
-#define SIGPROF                27
-#define SIGWINCH       28
-#define SIGIO          29
-#define SIGPOLL                SIGIO
-/*
-#define SIGLOST                29
-*/
-#define SIGPWR         30
-#define SIGSYS         31
-/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for 
backwards compatibility */
-#define        SIGUNUSED       31
-
-/* These should not be considered constants from userland.  */
-#define SIGRTMIN       32
-#define SIGRTMAX       _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_INTERRUPT is a no-op, but left due to historical reasons.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP   0x00000001
-#define SA_NOCLDWAIT   0x00000002
-#define SA_SIGINFO     0x00000004
-#define SA_ONSTACK     0x08000000
-#define SA_RESTART     0x10000000
-#define SA_NODEFER     0x40000000
-#define SA_RESETHAND   0x80000000
-
-#define SA_NOMASK      SA_NODEFER
-#define SA_ONESHOT     SA_RESETHAND
-#define SA_INTERRUPT   0x20000000 /* dummy -- ignored */
-
-#define SA_RESTORER    0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK     1
-#define SS_DISABLE     2
-
-/*
- * The minimum stack size needs to be fairly large because we want to
- * be sure that an app compiled for today's CPUs will continue to run
- * on all future CPU models.  The CPU model matters because the signal
- * frame needs to have space for the complete machine state, including
- * all physical stacked registers.  The number of physical stacked
- * registers is CPU model dependent, but given that the width of
- * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
- * more than 16KB of space.
- */
-#if 1
-  /*
-   * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but 
I typed it
-   * in wrong. ;-(  To preserve backwards compatibility, we leave the kernel 
at the
-   * incorrect value and fix libc only.
-   */
-# define MINSIGSTKSZ   131027  /* min. stack size for sigaltstack() */
-#else
-# define MINSIGSTKSZ   131072  /* min. stack size for sigaltstack() */
-#endif
-#define SIGSTKSZ       262144  /* default stack size for sigaltstack() */
-
-#ifdef __KERNEL__
-
-#define _NSIG          64
-#define _NSIG_BPW      64
-#define _NSIG_WORDS    (_NSIG / _NSIG_BPW)
-
-#define SA_PERCPU_IRQ          0x02000000
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/signal.h>
-
-# ifndef __ASSEMBLY__
-
-#  include <linux/types.h>
-
-/* Avoid too many header ordering problems.  */
-struct siginfo;
-
-typedef struct sigaltstack {
-       void __user *ss_sp;
-       int ss_flags;
-       size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
-
-/* Most things should be clean enough to redefine this at will, if care
-   is taken to make libc match.  */
-
-typedef unsigned long old_sigset_t;
-
-typedef struct {
-       unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-struct sigaction {
-       __sighandler_t sa_handler;
-       unsigned long sa_flags;
-       sigset_t sa_mask;               /* mask last for extensibility */
-};
-
-struct k_sigaction {
-       struct sigaction sa;
-};
-
-#  include <asm/sigcontext.h>
-
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
-
-#endif /* __KERNEL__ */
-
-# endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_SIGNAL_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/asm/smp.h
--- a/xen/include/asm-ia64/linux/asm/smp.h      Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,139 +0,0 @@
-/*
- * SMP Support
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@xxxxxxxxxxx>
- * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- *     Bjorn Helgaas <bjorn.helgaas@xxxxxx>
- */
-#ifndef _ASM_IA64_SMP_H
-#define _ASM_IA64_SMP_H
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/kernel.h>
-#include <linux/cpumask.h>
-
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/param.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-
-static inline unsigned int
-ia64_get_lid (void)
-{
-       union {
-               struct {
-                       unsigned long reserved : 16;
-                       unsigned long eid : 8;
-                       unsigned long id : 8;
-                       unsigned long ignored : 32;
-               } f;
-               unsigned long bits;
-       } lid;
-
-       lid.bits = ia64_getreg(_IA64_REG_CR_LID);
-       return lid.f.id << 8 | lid.f.eid;
-}
-
-#ifdef CONFIG_SMP
-
-#define XTP_OFFSET             0x1e0008
-
-#define SMP_IRQ_REDIRECTION    (1 << 0)
-#define SMP_IPI_REDIRECTION    (1 << 1)
-
-#define raw_smp_processor_id() (current_thread_info()->cpu)
-
-extern struct smp_boot_data {
-       int cpu_count;
-       int cpu_phys_id[NR_CPUS];
-} smp_boot_data __initdata;
-
-extern char no_int_routing __devinitdata;
-
-extern cpumask_t cpu_online_map;
-extern cpumask_t cpu_core_map[NR_CPUS];
-extern cpumask_t cpu_sibling_map[NR_CPUS];
-extern int smp_num_siblings;
-extern int smp_num_cpucores;
-extern void __iomem *ipi_base_addr;
-extern unsigned char smp_int_redirect;
-
-extern volatile int ia64_cpu_to_sapicid[];
-#define cpu_physical_id(i)     ia64_cpu_to_sapicid[i]
-
-extern unsigned long ap_wakeup_vector;
-
-/*
- * Function to map hard smp processor id to logical id.  Slow, so don't use 
this in
- * performance-critical code.
- */
-static inline int
-cpu_logical_id (int cpuid)
-{
-       int i;
-
-       for (i = 0; i < NR_CPUS; ++i)
-               if (cpu_physical_id(i) == cpuid)
-                       break;
-       return i;
-}
-
-/*
- * XTP control functions:
- *     min_xtp   : route all interrupts to this CPU
- *     normal_xtp: nominal XTP value
- *     max_xtp   : never deliver interrupts to this CPU.
- */
-
-static inline void
-min_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
-}
-
-static inline void
-normal_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
-}
-
-static inline void
-max_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
-}
-
-#define hard_smp_processor_id()                ia64_get_lid()
-
-/* Upping and downing of CPUs */
-extern int __cpu_disable (void);
-extern void __cpu_die (unsigned int cpu);
-extern void cpu_die (void) __attribute__ ((noreturn));
-extern int __cpu_up (unsigned int cpu);
-extern void __init smp_build_cpu_map(void);
-
-extern void __init init_smp_config (void);
-extern void smp_do_timer (struct pt_regs *regs);
-
-extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
-                                    int retry, int wait);
-extern void smp_send_reschedule (int cpu);
-extern void lock_ipi_calllock(void);
-extern void unlock_ipi_calllock(void);
-extern void identify_siblings (struct cpuinfo_ia64 *);
-
-#else
-
-#define cpu_logical_id(i)              0
-#define cpu_physical_id(i)             ia64_get_lid()
-
-#endif /* CONFIG_SMP */
-#endif /* _ASM_IA64_SMP_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/slab.h
--- a/xen/include/asm-ia64/linux/slab.h Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,137 +0,0 @@
-/*
- * linux/mm/slab.h
- * Written by Mark Hemment, 1996.
- * (markhe@xxxxxxxxxxxxxxxxx)
- */
-
-#ifndef _LINUX_SLAB_H
-#define        _LINUX_SLAB_H
-
-#if    defined(__KERNEL__)
-
-typedef struct kmem_cache_s kmem_cache_t;
-
-#include       <linux/config.h>        /* kmalloc_sizes.h needs CONFIG_ 
options */
-#include       <linux/gfp.h>
-#include       <linux/init.h>
-#include       <linux/types.h>
-#include       <asm/page.h>            /* kmalloc_sizes.h needs PAGE_SIZE */
-#include       <asm/cache.h>           /* kmalloc_sizes.h needs L1_CACHE_BYTES 
*/
-
-/* flags for kmem_cache_alloc() */
-#define        SLAB_NOFS               GFP_NOFS
-#define        SLAB_NOIO               GFP_NOIO
-#define        SLAB_ATOMIC             GFP_ATOMIC
-#define        SLAB_USER               GFP_USER
-#define        SLAB_KERNEL             GFP_KERNEL
-#define        SLAB_DMA                GFP_DMA
-
-#define SLAB_LEVEL_MASK                GFP_LEVEL_MASK
-
-#define        SLAB_NO_GROW            __GFP_NO_GROW   /* don't grow a cache */
-
-/* flags to pass to kmem_cache_create().
- * The first 3 are only valid when the allocator as been build
- * SLAB_DEBUG_SUPPORT.
- */
-#define        SLAB_DEBUG_FREE         0x00000100UL    /* Peform (expensive) 
checks on free */
-#define        SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor (as 
verifier) */
-#define        SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a 
cache */
-#define        SLAB_POISON             0x00000800UL    /* Poison objects */
-#define        SLAB_NO_REAP            0x00001000UL    /* never reap from the 
cache */
-#define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w 
cache lines */
-#define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
-#define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
-#define SLAB_STORE_USER                0x00010000UL    /* store the last owner 
for bug hunting */
-#define SLAB_RECLAIM_ACCOUNT   0x00020000UL    /* track pages allocated to 
indicate
-                                                  what is reclaimable later*/
-#define SLAB_PANIC             0x00040000UL    /* panic if kmem_cache_create() 
fails */
-#define SLAB_DESTROY_BY_RCU    0x00080000UL    /* defer freeing pages to RCU */
-
-/* flags passed to a constructor func */
-#define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then 
deconstructor */
-#define SLAB_CTOR_ATOMIC       0x002UL         /* tell constructor it can't 
sleep */
-#define        SLAB_CTOR_VERIFY        0x004UL         /* tell constructor 
it's a verify call */
-
-/* prototypes */
-extern void __init kmem_cache_init(void);
-
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned 
long,
-                                      void (*)(void *, kmem_cache_t *, 
unsigned long),
-                                      void (*)(void *, kmem_cache_t *, 
unsigned long));
-extern int kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
-extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int 
__nocast gfpflags);
-
-/* Size description struct for general caches. */
-struct cache_sizes {
-       size_t           cs_size;
-       kmem_cache_t    *cs_cachep;
-       kmem_cache_t    *cs_dmacachep;
-};
-extern struct cache_sizes malloc_sizes[];
-extern void *__kmalloc(size_t, unsigned int __nocast);
-
-static inline void *kmalloc(size_t size, unsigned int __nocast flags)
-{
-       if (__builtin_constant_p(size)) {
-               int i = 0;
-#define CACHE(x) \
-               if (size <= x) \
-                       goto found; \
-               else \
-                       i++;
-#include "kmalloc_sizes.h"
-#undef CACHE
-               {
-                       extern void __you_cannot_kmalloc_that_much(void);
-                       __you_cannot_kmalloc_that_much();
-               }
-found:
-               return kmem_cache_alloc((flags & GFP_DMA) ?
-                       malloc_sizes[i].cs_dmacachep :
-                       malloc_sizes[i].cs_cachep, flags);
-       }
-       return __kmalloc(size, flags);
-}
-
-extern void *kcalloc(size_t, size_t, unsigned int __nocast);
-extern void kfree(const void *);
-extern unsigned int ksize(const void *);
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
-extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
-#else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int 
node)
-{
-       return kmem_cache_alloc(cachep, flags);
-}
-static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int 
node)
-{
-       return kmalloc(size, flags);
-}
-#endif
-
-extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
-
-/* System wide caches */
-extern kmem_cache_t    *vm_area_cachep;
-extern kmem_cache_t    *names_cachep;
-extern kmem_cache_t    *files_cachep;
-extern kmem_cache_t    *filp_cachep;
-extern kmem_cache_t    *fs_cachep;
-extern kmem_cache_t    *signal_cachep;
-extern kmem_cache_t    *sighand_cachep;
-extern kmem_cache_t    *bio_cachep;
-
-extern atomic_t slab_reclaim_pages;
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_SLAB_H */
diff -r 7edd64c8bb36 -r eeac4fdf02ed xen/include/asm-ia64/linux/threads.h
--- a/xen/include/asm-ia64/linux/threads.h      Thu Mar  2 09:59:34 2006
+++ /dev/null   Thu Mar  2 10:00:49 2006
@@ -1,36 +0,0 @@
-#ifndef _LINUX_THREADS_H
-#define _LINUX_THREADS_H
-
-#include <linux/config.h>
-
-/*
- * The default limit for the nr of threads is now in
- * /proc/sys/kernel/threads-max.
- */
-
-/*
- * Maximum supported processors that can run under SMP.  This value is
- * set via configure setting.  The maximum is equal to the size of the
- * bitmasks used on that platform, i.e. 32 or 64.  Setting this smaller
- * saves quite a bit of memory.
- */
-#ifdef CONFIG_SMP
-#define NR_CPUS                CONFIG_NR_CPUS
-#else
-#define NR_CPUS                1
-#endif
-
-#define MIN_THREADS_LEFT_FOR_ROOT 4
-
-/*
- * This controls the default maximum pid allocated to a process
- */
-#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
-
-/*
- * A maximum of 4 million PIDs should be enough for a while:
- */
-#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
-       (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
-
-#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.