[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] First step to remove CONFIG_VTI for final supporting xen0+xenU+xenVTI at runtime. This changeset mainly addresses common code like domain creation and rid allocation policy, including:



# HG changeset patch
# User fred@xxxxxxxxxxxxxxxxxxxxx
# Node ID 1ec2225aa8c696ca4e96e0fc27b4eafe36a9633f
# Parent  97675c2dbb40f914a3b891e83fd5d7a40590e8b2
First step to remove CONFIG_VTI for final supporting xen0+xenU+xenVTI at 
runtime. This changeset mainly addresses common code like domain creation and 
rid allocation policy, including:

- Boot time vti feature detection
- Uniform arch_do_createdomain, new_thread, arch_set_infoguest, and 
construct_dom0. Now these function level CONFIG_VTIs have been removed with 
several specific lines still protected by CONFIG_VTIs. With more feature 
cleanup later, these lines will be free out grandually.
- Use same rid allocation policy including physical emulation
- Remove duplicated definition rr_t.

Verified breaking nothing. ;-)

Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/Makefile    Sat Aug 20 05:19:39 2005
@@ -14,8 +14,11 @@
        irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
        grant_table.o sn_console.o
 
+# TMP holder to contain *.0 moved out of CONFIG_VTI
+OBJS += vmx_init.o
+
 ifeq ($(CONFIG_VTI),y)
-OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
+OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
        vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
        vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
 endif
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c    Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/domain.c    Sat Aug 20 05:19:39 2005
@@ -38,25 +38,17 @@
 
 #include <asm/vcpu.h>   /* for function declarations */
 #include <public/arch-ia64.h>
-#ifdef CONFIG_VTI
 #include <asm/vmx.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/vmx_vpd.h>
 #include <asm/pal.h>
 #include <public/io/ioreq.h>
-#endif // CONFIG_VTI
 
 #define CONFIG_DOMAIN0_CONTIGUOUS
 unsigned long dom0_start = -1L;
-#ifdef CONFIG_VTI
 unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
 //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
 unsigned long dom0_align = 256*1024*1024;
-#else // CONFIG_VTI
-unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
-//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
-unsigned long dom0_align = 64*1024*1024;
-#endif // CONFIG_VTI
 #ifdef DOMU_BUILD_STAGING
 unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
 unsigned long domU_staging_start;
@@ -187,60 +179,6 @@
        memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
 }
 
-#ifdef CONFIG_VTI
-void arch_do_createdomain(struct vcpu *v)
-{
-       struct domain *d = v->domain;
-       struct thread_info *ti = alloc_thread_info(v);
-
-       /* Clear thread_info to clear some important fields, like preempt_count 
*/
-       memset(ti, 0, sizeof(struct thread_info));
-       init_switch_stack(v);
-
-       /* Shared info area is required to be allocated at domain
-        * creation, since control panel will write some I/O info
-        * between front end and back end to that area. However for
-        * vmx domain, our design is to let domain itself to allcoate
-        * shared info area, to keep machine page contiguous. So this
-        * page will be released later when domainN issues request
-        * after up.
-        */
-       d->shared_info = (void *)alloc_xenheap_page();
-       /* Now assume all vcpu info and event indicators can be
-        * held in one shared page. Definitely later we need to
-        * consider more about it
-        */
-
-       memset(d->shared_info, 0, PAGE_SIZE);
-       d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
-                       alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
-       printf("arch_vcpu_info=%p\n", 
d->shared_info->vcpu_data[0].arch.privregs);
-       memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, 
PAGE_SIZE);
-       v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
-       /* Mask all events, and specific port will be unmasked
-        * when customer subscribes to it.
-        */
-       if(v == d->vcpu[0]) {
-           memset(&d->shared_info->evtchn_mask[0], 0xff,
-               sizeof(d->shared_info->evtchn_mask));
-       }
-
-       /* Allocate per-domain vTLB and vhpt */
-       v->arch.vtlb = init_domain_tlb(v);
-
-       /* Physical->machine page table will be allocated when 
-        * final setup, since we have no the maximum pfn number in 
-        * this stage
-        */
-
-       /* FIXME: This is identity mapped address for xenheap. 
-        * Do we need it at all?
-        */
-       d->xen_vastart = XEN_START_ADDR;
-       d->xen_vaend = XEN_END_ADDR;
-       d->arch.breakimm = 0x1000;
-}
-#else // CONFIG_VTI
 void arch_do_createdomain(struct vcpu *v)
 {
        struct domain *d = v->domain;
@@ -263,11 +201,26 @@
        v->vcpu_info = &(d->shared_info->vcpu_data[0]);
 
        d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
-       if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
+
+#ifdef CONFIG_VTI
+       /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
+        * to this solution. Maybe it can be deferred until we know created
+        * one as vmx domain */
+       v->arch.vtlb = init_domain_tlb(v);
+#endif
+
+       /* We may also need emulation rid for region4, though it's unlikely
+        * to see guest issue uncacheable access in metaphysical mode. But
+        * keep such info here may be more sane.
+        */
+       if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL)
+        || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL))
                BUG();
        VCPU(v, metaphysical_mode) = 1;
        v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
+       v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
        v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
+       v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
 #define DOMAIN_RID_BITS_DEFAULT 18
        if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
                BUG();
@@ -292,7 +245,6 @@
                return -ENOMEM;
        }
 }
-#endif // CONFIG_VTI
 
 void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
 {
@@ -312,16 +264,28 @@
        c->shared = v->domain->shared_info->arch;
 }
 
-#ifndef CONFIG_VTI
 int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
 {
        struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + 
IA64_STK_OFFSET) - 1;
+       struct domain *d = v->domain;
+       int i, rc, ret;
+       unsigned long progress = 0;
 
        printf("arch_set_info_guest\n");
+       if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+            return 0;
+
+       if (c->flags & VGCF_VMX_GUEST) {
+           if (!vmx_enabled) {
+               printk("No VMX hardware feature for vmx domain.\n");
+               return -EINVAL;
+           }
+
+           vmx_setup_platform(v, c);
+       }
+
        *regs = c->regs;
-       regs->cr_ipsr = 
IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
-       regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
-       regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+       new_thread(v, regs->cr_iip, 0, 0);
 
        v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
        if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
@@ -330,100 +294,13 @@
            return -EFAULT;
        }
 
-       init_all_rr(v);
-
-       // this should be in userspace
-       regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0 
console=tty0 root=/dev/hda1",256L);  //FIXME
        v->arch.domain_itm_last = -1L;
-       VCPU(v, banknum) = 1;
-       VCPU(v, metaphysical_mode) = 1;
-
-       v->domain->shared_info->arch = c->shared;
+       d->shared_info->arch = c->shared;
+
+       /* Don't redo final setup */
+       set_bit(_VCPUF_initialised, &v->vcpu_flags);
        return 0;
 }
-#else // CONFIG_VTI
-int arch_set_info_guest(
-    struct vcpu *v, struct vcpu_guest_context *c)
-{
-    struct domain *d = v->domain;
-    int i, rc, ret;
-    unsigned long progress = 0;
-    shared_iopage_t *sp;
-
-    if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-        return 0;
-
-    /* Lazy FP not implemented yet */
-    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
-    if ( c->flags & VGCF_FPU_VALID )
-        set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
-
-    /* Sync d/i cache conservatively, after domain N is loaded */
-    ret = ia64_pal_cache_flush(3, 0, &progress, NULL);
-    if (ret != PAL_STATUS_SUCCESS)
-            panic("PAL CACHE FLUSH failed for dom[%d].\n",
-               v->domain->domain_id);
-    DPRINTK("Sync i/d cache for dom%d image SUCC\n",
-               v->domain->domain_id);
-
-    /* Physical mode emulation initialization, including
-     * emulation ID allcation and related memory request
-     */
-    physical_mode_init(v);
-
-    /* FIXME: only support PMT table continuously by far */
-    d->arch.pmt = __va(c->pt_base);
-    d->arch.max_pfn = c->pt_max_pfn;
-    d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
-    sp = get_sp(d);
-    memset((char *)sp,0,PAGE_SIZE);
-    /* FIXME: temp due to old CP */
-    sp->sp_global.eport = 2;
-#ifdef V_IOSAPIC_READY
-    sp->vcpu_number = 1;
-#endif
-    /* TEMP */
-    d->arch.vmx_platform.pib_base = 0xfee00000UL;
-    
-
-    if (c->flags & VGCF_VMX_GUEST) {
-       if (!vmx_enabled)
-           panic("No VMX hardware feature for vmx domain.\n");
-
-       vmx_final_setup_domain(d);
-
-       /* One more step to enable interrupt assist */
-       set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
-    }
-
-    vlsapic_reset(v);
-    vtm_init(v);
-
-    /* Only open one port for I/O and interrupt emulation */
-    if (v == d->vcpu[0]) {
-       memset(&d->shared_info->evtchn_mask[0], 0xff,
-               sizeof(d->shared_info->evtchn_mask));
-       clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
-    }
-    /* Setup domain context. Actually IA-64 is a bit different with
-     * x86, with almost all system resources better managed by HV
-     * directly. CP only needs to provide start IP of guest, which
-     * ideally is the load address of guest Firmware.
-     */
-    new_thread(v, c->guest_iip, 0, 0);
-
-
-    d->xen_vastart = XEN_START_ADDR;
-    d->xen_vaend = XEN_END_ADDR;
-    d->arch.breakimm = 0x1000 + d->domain_id;
-    v->arch._thread.on_ustack = 0;
-
-    /* Don't redo final setup */
-    set_bit(_VCPUF_initialised, &v->vcpu_flags);
-
-    return 0;
-}
-#endif // CONFIG_VTI
 
 void arch_do_boot_vcpu(struct vcpu *v)
 {
@@ -443,7 +320,8 @@
        printf("domain_relinquish_resources: not implemented\n");
 }
 
-#ifdef CONFIG_VTI
+// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
+// and linux/arch/ia64/kernel/process.c:kernel_thread()
 void new_thread(struct vcpu *v,
                 unsigned long start_pc,
                 unsigned long start_stack,
@@ -453,7 +331,6 @@
        struct pt_regs *regs;
        struct ia64_boot_param *bp;
        extern char saved_command_line[];
-       //char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca 
root=/dev/sdb1 ro";
 
 
 #ifdef CONFIG_DOMAIN0_CONTIGUOUS
@@ -471,61 +348,31 @@
                regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
        }
        regs->cr_iip = start_pc;
-       regs->cr_ifs = 0; /* why? - matthewc */
+       regs->cr_ifs = 1UL << 63; /* or clear? */
        regs->ar_fpsr = FPSR_DEFAULT;
+
        if (VMX_DOMAIN(v)) {
+#ifdef CONFIG_VTI
                vmx_init_all_rr(v);
-       } else
-               init_all_rr(v);
-
-       if (VMX_DOMAIN(v)) {
-               if (d == dom0) {
+               if (d == dom0)
                    VMX_VPD(v,vgr[12]) = 
dom_fw_setup(d,saved_command_line,256L);
-                   printk("new_thread, done with dom_fw_setup\n");
-               }
                /* Virtual processor context setup */
                VMX_VPD(v, vpsr) = IA64_PSR_BN;
                VPD_CR(v, dcr) = 0;
+#endif
        } else {
-               regs->r28 = dom_fw_setup(d,saved_command_line,256L);
+               init_all_rr(v);
+               if (d == dom0) 
+                   regs->r28 = dom_fw_setup(d,saved_command_line,256L);
+               else {
+                   regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+                   regs->r28 = dom_fw_setup(d,"nomca nosmp xencons=tty0 
console=tty0 root=/dev/hda1",256L);  //FIXME
+               }
                VCPU(v, banknum) = 1;
                VCPU(v, metaphysical_mode) = 1;
                d->shared_info->arch.flags = (d == dom0) ? 
(SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN)
 : 0;
        }
 }
-#else // CONFIG_VTI
-
-// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
-// and linux/arch/ia64/kernel/process.c:kernel_thread()
-void new_thread(struct vcpu *v,
-                   unsigned long start_pc,
-                   unsigned long start_stack,
-                   unsigned long start_info)
-{
-       struct domain *d = v->domain;
-       struct pt_regs *regs;
-       struct ia64_boot_param *bp;
-       extern char saved_command_line[];
-
-#ifdef CONFIG_DOMAIN0_CONTIGUOUS
-       if (d == dom0) start_pc += dom0_start;
-#endif
-
-       regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
-       regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
-               | IA64_PSR_BITS_TO_SET | IA64_PSR_BN
-               & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
-       regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
-       regs->cr_iip = start_pc;
-       regs->cr_ifs = 1UL << 63;
-       regs->ar_fpsr = FPSR_DEFAULT;
-       init_all_rr(v);
-       regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
-       VCPU(v, banknum) = 1;
-       VCPU(v, metaphysical_mode) = 1;
-       d->shared_info->arch.flags = (d == dom0) ? 
(SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN)
 : 0;
-}
-#endif // CONFIG_VTI
 
 static struct page * map_new_domain0_page(unsigned long mpaddr)
 {
@@ -903,44 +750,6 @@
 }
 #endif
 
-#ifdef CONFIG_VTI
-/* Up to whether domain is vmx one, different context may be setup
- * here.
- */
-void
-post_arch_do_create_domain(struct vcpu *v, int vmx_domain)
-{
-    struct domain *d = v->domain;
-
-    if (!vmx_domain) {
-       d->shared_info = (void*)alloc_xenheap_page();
-       if (!d->shared_info)
-               panic("Allocate share info for non-vmx domain failed.\n");
-       d->shared_info_va = 0xfffd000000000000;
-
-       printk("Build shared info for non-vmx domain\n");
-       build_shared_info(d);
-       /* Setup start info area */
-    }
-}
-
-/* For VMX domain, this is invoked when kernel model in domain
- * request actively
- */
-void build_shared_info(struct domain *d)
-{
-    int i;
-
-    /* Set up shared-info area. */
-    update_dom_time(d);
-
-    /* Mask all upcalls... */
-    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
-        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
-
-    /* ... */
-}
-
 /*
  * Domain 0 has direct access to all devices absolutely. However
  * the major point of this stub here, is to allow alloc_dom_mem
@@ -959,182 +768,12 @@
                       unsigned long initrd_start, unsigned long initrd_len,
                       char *cmdline)
 {
-    char *dst;
-    int i, rc;
-    unsigned long pfn, mfn;
-    unsigned long nr_pt_pages;
-    unsigned long count;
-    unsigned long alloc_start, alloc_end;
-    struct pfn_info *page = NULL;
-    start_info_t *si;
-    struct vcpu *v = d->vcpu[0];
-    struct domain_setup_info dsi;
-    unsigned long p_start;
-    unsigned long pkern_start;
-    unsigned long pkern_entry;
-    unsigned long pkern_end;
-    unsigned long ret;
-    unsigned long progress = 0;
-
-//printf("construct_dom0: starting\n");
-    /* Sanity! */
-#ifndef CLONE_DOMAIN0
-    if ( d != dom0 ) 
-        BUG();
-    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
-        BUG();
-#endif
-
-    printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
-    memset(&dsi, 0, sizeof(struct domain_setup_info));
-
-    printk("*** LOADING DOMAIN 0 ***\n");
-
-    alloc_start = dom0_start;
-    alloc_end = dom0_start + dom0_size;
-    d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE;
-    image_start = __va(ia64_boot_param->initrd_start);
-    image_len = ia64_boot_param->initrd_size;
-
-    dsi.image_addr = (unsigned long)image_start;
-    dsi.image_len  = image_len;
-    rc = parseelfimage(&dsi);
-    if ( rc != 0 )
-        return rc;
-
-    /* Temp workaround */
-    if (running_on_sim)
-       dsi.xen_section_string = (char *)1;
-
-    if ((!vmx_enabled) && !dsi.xen_section_string) {
-       printk("Lack of hardware support for unmodified vmx dom0\n");
-       panic("");
-    }
-
-    if (vmx_enabled && !dsi.xen_section_string) {
-       printk("Dom0 is vmx domain!\n");
-       vmx_dom0 = 1;
-    }
-
-    p_start = dsi.v_start;
-    pkern_start = dsi.v_kernstart;
-    pkern_end = dsi.v_kernend;
-    pkern_entry = dsi.v_kernentry;
-
-    printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",
-       p_start,pkern_start,pkern_end,pkern_entry);
-
-    if ( (p_start & (PAGE_SIZE-1)) != 0 )
-    {
-        printk("Initial guest OS must load to a page boundary.\n");
-        return -EINVAL;
-    }
-
-    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
-           " Kernel image:  %lx->%lx\n"
-           " Entry address: %lx\n"
-           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
-           pkern_start, pkern_end, pkern_entry);
-
-    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
-    {
-        printk("Initial guest OS requires too much space\n"
-               "(%luMB is greater than %luMB limit)\n",
-               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
-        return -ENOMEM;
-    }
-
-    // Other sanity check about Dom0 image
-
-    /* Construct a frame-allocation list for the initial domain, since these
-     * pages are allocated by boot allocator and pfns are not set properly
-     */
-    for ( mfn = (alloc_start>>PAGE_SHIFT); 
-          mfn < (alloc_end>>PAGE_SHIFT); 
-          mfn++ )
-    {
-        page = &frame_table[mfn];
-        page_set_owner(page, d);
-        page->u.inuse.type_info = 0;
-        page->count_info        = PGC_allocated | 1;
-        list_add_tail(&page->list, &d->page_list);
-
-       /* Construct 1:1 mapping */
-       machine_to_phys_mapping[mfn] = mfn;
-    }
-
-    post_arch_do_create_domain(v, vmx_dom0);
-
-    /* Load Dom0 image to its own memory */
-    loaddomainelfimage(d,image_start);
-
-    /* Copy the initial ramdisk. */
-
-    /* Sync d/i cache conservatively */
-    ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
-    if (ret != PAL_STATUS_SUCCESS)
-            panic("PAL CACHE FLUSH failed for dom0.\n");
-    printk("Sync i/d cache for dom0 image SUCC\n");
-
-    /* Physical mode emulation initialization, including
-     * emulation ID allcation and related memory request
-     */
-    physical_mode_init(v);
-    /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
-     * for dom0
-     */
-    d->arch.pmt = NULL;
-
-    /* Give up the VGA console if DOM0 is configured to grab it. */
-    if (cmdline != NULL)
-       console_endboot(strstr(cmdline, "tty0") != NULL);
-
-    /* VMX specific construction for Dom0, if hardware supports VMX
-     * and Dom0 is unmodified image
-     */
-    printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
-    if (vmx_dom0)
-       vmx_final_setup_domain(dom0);
-    
-    /* vpd is ready now */
-    vlsapic_reset(v);
-    vtm_init(v);
-
-    set_bit(_DOMF_constructed, &d->domain_flags);
-    new_thread(v, pkern_entry, 0, 0);
-
-    physdev_init_dom0(d);
-    // FIXME: Hack for keyboard input
-#ifdef CLONE_DOMAIN0
-if (d == dom0)
-#endif
-    serial_input_init();
-    if (d == dom0) {
-       VCPU(v, delivery_mask[0]) = -1L;
-       VCPU(v, delivery_mask[1]) = -1L;
-       VCPU(v, delivery_mask[2]) = -1L;
-       VCPU(v, delivery_mask[3]) = -1L;
-    }
-    else __set_bit(0x30,VCPU(v, delivery_mask));
-
-    return 0;
-}
-
-
-#else //CONFIG_VTI
-
-int construct_dom0(struct domain *d, 
-                      unsigned long image_start, unsigned long image_len, 
-                      unsigned long initrd_start, unsigned long initrd_len,
-                      char *cmdline)
-{
        char *dst;
        int i, rc;
        unsigned long pfn, mfn;
        unsigned long nr_pt_pages;
        unsigned long count;
-       //l2_pgentry_t *l2tab, *l2start;
-       //l1_pgentry_t *l1tab = NULL, *l1start = NULL;
+       unsigned long alloc_start, alloc_end;
        struct pfn_info *page = NULL;
        start_info_t *si;
        struct vcpu *v = d->vcpu[0];
@@ -1144,6 +783,7 @@
        unsigned long pkern_start;
        unsigned long pkern_entry;
        unsigned long pkern_end;
+       unsigned long ret, progress = 0;
 
 //printf("construct_dom0: starting\n");
        /* Sanity! */
@@ -1158,7 +798,9 @@
 
        printk("*** LOADING DOMAIN 0 ***\n");
 
-       d->max_pages = dom0_size/PAGE_SIZE;
+       alloc_start = dom0_start;
+       alloc_end = dom0_start + dom0_size;
+       d->tot_pages = d->max_pages = dom0_size/PAGE_SIZE;
        image_start = __va(ia64_boot_param->initrd_start);
        image_len = ia64_boot_param->initrd_size;
 //printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
@@ -1171,6 +813,23 @@
        if ( rc != 0 )
            return rc;
 
+#ifdef CONFIG_VTI
+       /* Temp workaround */
+       if (running_on_sim)
+           dsi.xen_section_string = (char *)1;
+
+       /* Check whether dom0 is vti domain */
+       if ((!vmx_enabled) && !dsi.xen_section_string) {
+           printk("Lack of hardware support for unmodified vmx dom0\n");
+           panic("");
+       }
+
+       if (vmx_enabled && !dsi.xen_section_string) {
+           printk("Dom0 is vmx domain!\n");
+           vmx_dom0 = 1;
+       }
+#endif
+
        p_start = dsi.v_start;
        pkern_start = dsi.v_kernstart;
        pkern_end = dsi.v_kernend;
@@ -1214,13 +873,42 @@
        for ( i = 0; i < MAX_VIRT_CPUS; i++ )
            d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 
+#ifdef CONFIG_VTI
+       /* Construct a frame-allocation list for the initial domain, since these
+        * pages are allocated by boot allocator and pfns are not set properly
+        */
+       for ( mfn = (alloc_start>>PAGE_SHIFT); 
+             mfn < (alloc_end>>PAGE_SHIFT); 
+             mfn++ )
+       {
+            page = &frame_table[mfn];
+            page_set_owner(page, d);
+            page->u.inuse.type_info = 0;
+            page->count_info        = PGC_allocated | 1;
+            list_add_tail(&page->list, &d->page_list);
+
+           /* Construct 1:1 mapping */
+           machine_to_phys_mapping[mfn] = mfn;
+       }
+
+       /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
+        * for dom0
+        */
+       d->arch.pmt = NULL;
+#endif
+
        /* Copy the OS image. */
-       //(void)loadelfimage(image_start);
        loaddomainelfimage(d,image_start);
 
        /* Copy the initial ramdisk. */
        //if ( initrd_len != 0 )
        //    memcpy((void *)vinitrd_start, initrd_start, initrd_len);
+
+       /* Sync d/i cache conservatively */
+       ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
+       if (ret != PAL_STATUS_SUCCESS)
+           panic("PAL CACHE FLUSH failed for dom0.\n");
+       printk("Sync i/d cache for dom0 image SUCC\n");
 
 #if 0
        /* Set up start info area. */
@@ -1257,14 +945,21 @@
 #endif
        
        /* Give up the VGA console if DOM0 is configured to grab it. */
-#ifdef IA64
        if (cmdline != NULL)
-#endif
-       console_endboot(strstr(cmdline, "tty0") != NULL);
+           console_endboot(strstr(cmdline, "tty0") != NULL);
+
+       /* VMX specific construction for Dom0, if hardware supports VMX
+        * and Dom0 is unmodified image
+        */
+       printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
+       if (vmx_dom0)
+           vmx_final_setup_domain(dom0);
 
        set_bit(_DOMF_constructed, &d->domain_flags);
 
        new_thread(v, pkern_entry, 0, 0);
+       physdev_init_dom0(d);
+
        // FIXME: Hack for keyboard input
 #ifdef CLONE_DOMAIN0
 if (d == dom0)
@@ -1280,7 +975,6 @@
 
        return 0;
 }
-#endif // CONFIG_VTI
 
 // FIXME: When dom0 can construct domains, this goes away (or is rewritten)
 int construct_domU(struct domain *d,
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/linux-xen/setup.c
--- a/xen/arch/ia64/linux-xen/setup.c   Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/linux-xen/setup.c   Sat Aug 20 05:19:39 2005
@@ -51,9 +51,7 @@
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/unistd.h>
-#ifdef CONFIG_VTI
 #include <asm/vmx.h>
-#endif // CONFIG_VTI
 #include <asm/io.h>
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
@@ -402,9 +400,9 @@
        cpu_physical_id(0) = hard_smp_processor_id();
 #endif
 
-#ifdef CONFIG_VTI
+#ifdef XEN
        identify_vmx_feature();
-#endif // CONFIG_VTI
+#endif
 
        cpu_init();     /* initialize the bootstrap CPU */
 
@@ -600,7 +598,7 @@
        c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
        c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
 
-#ifdef CONFIG_VTI
+#ifdef XEN
        /* If vmx feature is on, do necessary initialization for vmx */
        if (vmx_enabled)
                vmx_init_env();
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/regionreg.c
--- a/xen/arch/ia64/regionreg.c Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/regionreg.c Sat Aug 20 05:19:39 2005
@@ -29,9 +29,6 @@
 #define        MAX_RID_BLOCKS  (1 << 
(IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
 #define RIDS_PER_RIDBLOCK MIN_RIDS
 
-// This is the one global memory representation of the default Xen region reg
-ia64_rr xen_rr;
-
 #if 0
 // following already defined in include/asm-ia64/gcc_intrin.h
 // it should probably be ifdef'd out from there to ensure all region
@@ -65,7 +62,7 @@
 
 
 // returns -1 if none available
-unsigned long allocate_metaphysical_rr0(void)
+unsigned long allocate_metaphysical_rr(void)
 {
        ia64_rr rrv;
 
@@ -79,17 +76,6 @@
 {
        // fix this when the increment allocation mechanism is fixed.
        return 1;
-}
-
-
-void init_rr(void)
-{
-       xen_rr.rrval = 0;
-       xen_rr.ve = 0;
-       xen_rr.rid = allocate_reserved_rid();
-       xen_rr.ps = PAGE_SHIFT;
-
-       printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
 }
 
 /*************************************
@@ -186,34 +172,6 @@
        return 1;
 }
 
-
-// This function is purely for performance... apparently scrambling
-//  bits in the region id makes for better hashing, which means better
-//  use of the VHPT, which means better performance
-// Note that the only time a RID should be mangled is when it is stored in
-//  a region register; anytime it is "viewable" outside of this module,
-//  it should be unmangled
-
-// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
-// Must ensure these two remain consistent!
-static inline unsigned long
-vmMangleRID(unsigned long RIDVal)
-{
-       union bits64 { unsigned char bytes[4]; unsigned long uint; };
-
-       union bits64 t;
-       unsigned char tmp;
-
-       t.uint = RIDVal;
-       tmp = t.bytes[1];
-       t.bytes[1] = t.bytes[3];
-       t.bytes[3] = tmp;
-
-       return t.uint;
-}
-
-// since vmMangleRID is symmetric, use it for unmangling also
-#define vmUnmangleRID(x)       vmMangleRID(x)
 
 static inline void
 set_rr_no_srlz(unsigned long rr, unsigned long rrval)
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c      Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vcpu.c      Sat Aug 20 05:19:39 2005
@@ -14,9 +14,7 @@
 #include <asm/tlb.h>
 #include <asm/processor.h>
 #include <asm/delay.h>
-#ifdef CONFIG_VTI
 #include <asm/vmx_vcpu.h>
-#endif // CONFIG_VTI
 
 typedef        union {
        struct ia64_psr ia64_psr;
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vmmu.c
--- a/xen/arch/ia64/vmmu.c      Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vmmu.c      Sat Aug 20 05:19:39 2005
@@ -81,10 +81,10 @@
 /*
  * The VRN bits of va stand for which rr to get.
  */
-rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
-{
-    rr_t   vrr;
-    vmx_vcpu_get_rr(vcpu, va, &vrr.value);
+ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
+{
+    ia64_rr   vrr;
+    vmx_vcpu_get_rr(vcpu, va, &vrr.rrval);
     return vrr;
 }
 
@@ -240,7 +240,7 @@
     u64     saved_itir, saved_ifa, saved_rr;
     u64     pages;
     thash_data_t    mtlb;
-    rr_t    vrr;
+    ia64_rr vrr;
     unsigned int    cl = tlb->cl;
 
     mtlb.ifa = tlb->vadr;
@@ -264,7 +264,7 @@
     /* Only access memory stack which is mapped by TR,
      * after rr is switched.
      */
-    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
+    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.rrval));
     ia64_srlz_d();
     if ( cl == ISIDE_TLB ) {
         ia64_itci(mtlb.page_flags);
@@ -287,12 +287,12 @@
     u64     hash_addr, tag;
     unsigned long psr;
     struct vcpu *v = current;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
-    vrr.value = saved_rr0;
+    vrr.rrval = saved_rr0;
     vrr.rid = rid;
     vrr.ps = ps;
 
@@ -300,7 +300,7 @@
     // TODO: Set to enforce lazy mode
     local_irq_save(psr);
     ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
+    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
     ia64_srlz_d();
 
     hash_addr = ia64_thash(va);
@@ -318,19 +318,19 @@
     u64     hash_addr, tag;
     u64     psr;
     struct vcpu *v = current;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     // TODO: Set to enforce lazy mode    
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
-    vrr.value = saved_rr0;
+    vrr.rrval = saved_rr0;
     vrr.rid = rid;
     vrr.ps = ps;
 
     va = (va << 3) >> 3;    // set VRN to 0.
     local_irq_save(psr);
     ia64_setreg(_IA64_REG_CR_PTA, pta.val);
-    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
+    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
     ia64_srlz_d();
 
     tag = ia64_ttag(va);
@@ -354,15 +354,15 @@
 {
     u64       saved_rr0;
     u64       psr;
-    rr_t      vrr;
+    ia64_rr vrr;
 
     va = (va << 3) >> 3;    // set VRN to 0.
     saved_rr0 = ia64_get_rr(0);
-    vrr.value = saved_rr0;
+    vrr.rrval = saved_rr0;
     vrr.rid = rid;
     vrr.ps = ps;
     local_irq_save(psr);
-    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
+    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.rrval) );
     ia64_srlz_d();
     ia64_ptcl(va, ps << 2);
     ia64_set_rr( 0, saved_rr0 );
@@ -421,14 +421,14 @@
     u64     gpip;   // guest physical IP
     u64     mpa;
     thash_data_t    *tlb;
-    rr_t    vrr;
+    ia64_rr vrr;
     u64     mfn;
     
     if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
         gpip = gip;
     }
     else {
-        vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
+        vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
         tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 
                 vrr.rid, gip, ISIDE_TLB );
         if ( tlb == NULL ) panic("No entry found in ITLB\n");
@@ -448,7 +448,7 @@
     thash_data_t data, *ovl;
     thash_cb_t  *hcb;
     search_section_t sections;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -481,7 +481,7 @@
     thash_data_t data, *ovl;
     thash_cb_t  *hcb;
     search_section_t sections;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -511,7 +511,7 @@
 {
 
     thash_cb_t  *hcb;
-    rr_t  vrr;
+    ia64_rr vrr;
     u64          preferred_size;
 
     vmx_vcpu_get_rr(vcpu, va, &vrr);
@@ -527,7 +527,7 @@
     thash_data_t data, *ovl;
     thash_cb_t  *hcb;
     search_section_t sections;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
@@ -559,7 +559,7 @@
     thash_data_t data, *ovl;
     thash_cb_t  *hcb;
     search_section_t sections;
-    rr_t    vrr;
+    ia64_rr    vrr;
 
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vmx_init.c
--- a/xen/arch/ia64/vmx_init.c  Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vmx_init.c  Sat Aug 20 05:19:39 2005
@@ -22,6 +22,9 @@
  */
 
 /*
+ * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@xxxxxxxxx>:
+ * Disable doubling mapping
+ *
  * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx>:
  * Simplied design in first step:
  *     - One virtual environment
@@ -39,6 +42,7 @@
 #include <xen/lib.h>
 #include <asm/vmmu.h>
 #include <public/arch-ia64.h>
+#include <public/io/ioreq.h>
 #include <asm/vmx_phy_mode.h>
 #include <asm/processor.h>
 #include <asm/vmx.h>
@@ -126,8 +130,43 @@
        else
                ASSERT(tmp_base != __vsa_base);
 
+#ifdef XEN_DBL_MAPPING
        /* Init stub for rr7 switch */
        vmx_init_double_mapping_stub();
+#endif 
+}
+
+void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c)
+{
+       struct domain *d = v->domain;
+       shared_iopage_t *sp;
+
+       ASSERT(d != dom0); /* only for non-privileged vti domain */
+       d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
+       sp = get_sp(d);
+       memset((char *)sp,0,PAGE_SIZE);
+       /* FIXME: temp due to old CP */
+       sp->sp_global.eport = 2;
+#ifdef V_IOSAPIC_READY
+       sp->vcpu_number = 1;
+#endif
+       /* TEMP */
+       d->arch.vmx_platform.pib_base = 0xfee00000UL;
+
+       /* One more step to enable interrupt assist */
+       set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
+       /* Only open one port for I/O and interrupt emulation */
+       if (v == d->vcpu[0]) {
+           memset(&d->shared_info->evtchn_mask[0], 0xff,
+               sizeof(d->shared_info->evtchn_mask));
+           clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
+       }
+
+       /* FIXME: only support PMT table continuously by far */
+       d->arch.pmt = __va(c->pt_base);
+       d->arch.max_pfn = c->pt_max_pfn;
+
+       vmx_final_setup_domain(d);
 }
 
 typedef union {
@@ -171,7 +210,7 @@
 }
 
 
-
+#ifdef CONFIG_VTI
 /*
  * Create a VP on intialized VMX environment.
  */
@@ -190,6 +229,7 @@
                panic("ia64_pal_vp_create failed. \n");
 }
 
+#ifdef XEN_DBL_MAPPING
 void vmx_init_double_mapping_stub(void)
 {
        u64 base, psr;
@@ -206,6 +246,7 @@
        ia64_srlz_i();
        printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", 
(u64)(__pa(base)));
 }
+#endif
 
 /* Other non-context related tasks can be done in context switch */
 void
@@ -219,12 +260,14 @@
        if (status != PAL_STATUS_SUCCESS)
                panic("Save vp status failed\n");
 
+#ifdef XEN_DBL_MAPPING
        /* FIXME: Do we really need purge double mapping for old vcpu?
         * Since rid is completely different between prev and next,
         * it's not overlap and thus no MCA possible... */
        dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
         vmx_purge_double_mapping(dom_rr7, KERNEL_START,
                                 (u64)v->arch.vtlb->ts->vhpt->hash);
+#endif
 
        /* Need to save KR when domain switch, though HV itself doesn;t
         * use them.
@@ -252,12 +295,14 @@
        if (status != PAL_STATUS_SUCCESS)
                panic("Restore vp status failed\n");
 
+#ifdef XEN_DBL_MAPPING
        dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
        pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
        pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> 
PAGE_SHIFT), PAGE_KERNEL));
        vmx_insert_double_mapping(dom_rr7, KERNEL_START,
                                  (u64)v->arch.vtlb->ts->vhpt->hash,
                                  pte_xen, pte_vhpt);
+#endif
 
        ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
        ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
@@ -271,6 +316,7 @@
         * anchored in vcpu */
 }
 
+#ifdef XEN_DBL_MAPPING
 /* Purge old double mapping and insert new one, due to rr7 change */
 void
 vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
@@ -287,6 +333,8 @@
                                  vhpt_base,
                                  pte_xen, pte_vhpt);
 }
+#endif // XEN_DBL_MAPPING
+#endif // CONFIG_VTI
 
 /*
  * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
@@ -307,12 +355,21 @@
        v->arch.arch_vmx.vpd = vpd;
        vpd->virt_env_vaddr = vm_buffer;
 
+#ifdef CONFIG_VTI
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
 
        /* Set this ed to be vmx */
        set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
 
+       /* Physical mode emulation initialization, including
+       * emulation ID allcation and related memory request
+       */
+       physical_mode_init(v);
+
+       vlsapic_reset(v);
+       vtm_init(v);
+#endif
+
        /* Other vmx specific initialization work */
 }
-
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx_phy_mode.c      Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vmx_phy_mode.c      Sat Aug 20 05:19:39 2005
@@ -104,22 +104,8 @@
     UINT64 psr;
     struct domain * d = vcpu->domain;
 
-    vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->domain_id)<<3);
-    /* FIXME */
-#if 0
-    vcpu->domain->arch.emul_phy_rr0.ps = 28;  /* set page size to 256M */
-#endif
-       vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT;  /* set page 
size to 4k */
-    vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this 
region */
-
-    vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->domain_id)<<3) + 
4;
-    vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT;  /* set page 
size to 4k */
-    vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this 
region */
-
     vcpu->arch.old_rsc = 0;
     vcpu->arch.mode_flags = GUEST_IN_PHY;
-
-    return;
 }
 
 extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
@@ -246,8 +232,12 @@
 vmx_load_all_rr(VCPU *vcpu)
 {
        unsigned long psr;
+       ia64_rr phy_rr;
 
        psr = ia64_clear_ic();
+
+       phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
+       phy_rr.ve = 1;
 
        /* WARNING: not allow co-exist of both virtual mode and physical
         * mode in same region
@@ -255,10 +245,10 @@
        if (is_physical_mode(vcpu)) {
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
-               ia64_set_rr((VRN0 << VRN_SHIFT),
-                            vcpu->domain->arch.emul_phy_rr0.rrval);
-               ia64_set_rr((VRN4 << VRN_SHIFT),
-                            vcpu->domain->arch.emul_phy_rr4.rrval);
+               phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+               ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
+               phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+               ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
        } else {
                ia64_set_rr((VRN0 << VRN_SHIFT),
                             vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
@@ -284,13 +274,18 @@
 switch_to_physical_rid(VCPU *vcpu)
 {
     UINT64 psr;
+    ia64_rr phy_rr;
+
+    phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
+    phy_rr.ve = 1;
 
     /* Save original virtual mode rr[0] and rr[4] */
-
     psr=ia64_clear_ic();
-    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval);
+    phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+    ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
-    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval);
+    phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+    ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
 
     ia64_set_psr(psr);
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vmx_vcpu.c
--- a/xen/arch/ia64/vmx_vcpu.c  Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vmx_vcpu.c  Sat Aug 20 05:19:39 2005
@@ -234,9 +234,11 @@
     case VRN7:
         VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
         /* Change double mapping for this domain */
+#ifdef XEN_DBL_MAPPING
         vmx_change_double_mapping(vcpu,
                       vmx_vrrtomrr(vcpu,oldrr.rrval),
                       vmx_vrrtomrr(vcpu,newrr.rrval));
+#endif
         break;
     default:
         ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/vtlb.c
--- a/xen/arch/ia64/vtlb.c      Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/vtlb.c      Sat Aug 20 05:19:39 2005
@@ -283,7 +283,7 @@
             thash_data_t *vhpt)
 {
     u64 pages,mfn;
-    rr_t vrr;
+    ia64_rr vrr;
 
     ASSERT ( hcb->ht == THASH_VHPT );
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
@@ -361,7 +361,7 @@
 {
     thash_data_t    *hash_table, *cch;
     int flag;
-    rr_t  vrr;
+    ia64_rr vrr;
     u64 gppn;
     u64 ppns, ppne;
     
@@ -397,7 +397,7 @@
 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t    *hash_table, *cch;
-    rr_t  vrr;
+    ia64_rr vrr;
     
     hash_table = (hcb->hash_func)(hcb->pta,
                         va, entry->rid, entry->ps);
@@ -425,7 +425,7 @@
 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t    *hash_table;
-    rr_t  vrr;
+    ia64_rr vrr;
     
     vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
     if ( entry->ps != vrr.ps && entry->tc ) {
@@ -556,7 +556,7 @@
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
     u64     tag;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
@@ -580,7 +580,7 @@
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
     u64     tag;
-    rr_t    vrr;
+    ia64_rr vrr;
 
     priv->_curva = va & ~(size-1);
     priv->_eva = priv->_curva + size;
@@ -633,7 +633,7 @@
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
     u64 addr,rr_psize;
-    rr_t  vrr;
+    ia64_rr vrr;
 
     if ( priv->s_sect.tr ) {
         ovl = vtr_find_next_overlap (hcb);
@@ -665,7 +665,7 @@
     thash_data_t    *ovl;
     thash_internal_t *priv = &hcb->priv;
     u64 addr,rr_psize;
-    rr_t  vrr;
+    ia64_rr vrr;
 
     vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
     rr_psize = PSIZE(vrr.ps);
@@ -800,7 +800,7 @@
 {
     thash_data_t    *hash_table, *cch;
     u64     tag;
-    rr_t    vrr;
+    ia64_rr vrr;
    
     ASSERT ( hcb->ht == THASH_VTLB );
     
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/xenmem.c
--- a/xen/arch/ia64/xenmem.c    Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/xenmem.c    Sat Aug 20 05:19:39 2005
@@ -30,8 +30,8 @@
  */
 #ifdef CONFIG_VTI
 unsigned long *mpt_table;
-unsigned long *mpt_table_size;
-#endif
+unsigned long mpt_table_size;
+#endif // CONFIG_VTI
 
 void
 paging_init (void)
@@ -53,21 +53,6 @@
 
        printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
        memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
-
-       /* Any more setup here? On VMX enabled platform,
-        * there's no need to keep guest linear pg table,
-        * and read only mpt table. MAP cache is not used
-        * in this stage, and later it will be in region 5.
-        * IO remap is in region 6 with identity mapping.
-        */
-       /* HV_tlb_init(); */
-
-#else // CONFIG_VTI
-
-       /* Allocate and map the machine-to-phys table */
-       if ((pg = alloc_domheap_pages(NULL, 10, 0)) == NULL)
-               panic("Not enough memory to bootstrap Xen.\n");
-       memset(page_to_virt(pg), 0x55, 16UL << 20);
 #endif // CONFIG_VTI
 
        /* Other mapping setup */
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/arch/ia64/xensetup.c
--- a/xen/arch/ia64/xensetup.c  Sat Aug 20 04:45:43 2005
+++ b/xen/arch/ia64/xensetup.c  Sat Aug 20 05:19:39 2005
@@ -181,11 +181,6 @@
     printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
            xen_pstart, xenheap_phys_end);
 
-#ifdef CONFIG_VTI
-    /* If we want to enable vhpt for all regions, related initialization
-     * for HV TLB must be done earlier before first TLB miss
-     */
-#endif // CONFIG_VTI
     /* Find next hole */
     firsthole_start = 0;
     efi_memmap_walk(xen_find_first_hole, &firsthole_start);
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/domain.h     Sat Aug 20 05:19:39 2005
@@ -3,39 +3,28 @@
 
 #include <linux/thread_info.h>
 #include <asm/tlb.h>
-#ifdef CONFIG_VTI
 #include <asm/vmx_vpd.h>
 #include <asm/vmmu.h>
 #include <asm/regionreg.h>
 #include <public/arch-ia64.h>
 #include <asm/vmx_platform.h>
-#endif // CONFIG_VTI
 #include <xen/list.h>
 
 extern void arch_do_createdomain(struct vcpu *);
 
 extern void domain_relinquish_resources(struct domain *);
 
-#ifdef CONFIG_VTI
-struct trap_bounce {
-       // TO add, FIXME Eddie
-};
-
-#define         PMT_SIZE       (32L*1024*1024)         // 32M for PMT
-#endif // CONFIG_VTI
-
 struct arch_domain {
     struct mm_struct *active_mm;
     struct mm_struct *mm;
     int metaphysical_rr0;
+    int metaphysical_rr4;
     int starting_rid;          /* first RID assigned to domain */
     int ending_rid;            /* one beyond highest RID assigned to domain */
     int rid_bits;              /* number of virtual rid bits (default: 18) */
     int breakimm;
-#ifdef  CONFIG_VTI
+
     int imp_va_msb;
-    ia64_rr emul_phy_rr0;
-    ia64_rr emul_phy_rr4;
     unsigned long *pmt;        /* physical to machine table */
     /*
      * max_pfn is the maximum page frame in guest physical space, including
@@ -44,7 +33,7 @@
      */
     unsigned long max_pfn;
     struct virutal_platform_def     vmx_platform;
-#endif  //CONFIG_VTI
+
     u64 xen_vastart;
     u64 xen_vaend;
     u64 shared_info_va;
@@ -78,15 +67,15 @@
 #endif
     void *regs;        /* temporary until find a better way to do privops */
     int metaphysical_rr0;              // from arch_domain (so is pinned)
+    int metaphysical_rr4;              // from arch_domain (so is pinned)
     int metaphysical_saved_rr0;                // from arch_domain (so is 
pinned)
+    int metaphysical_saved_rr4;                // from arch_domain (so is 
pinned)
     int breakimm;                      // from arch_domain (so is pinned)
     int starting_rid;          /* first RID assigned to domain */
     int ending_rid;            /* one beyond highest RID assigned to domain */
     struct mm_struct *active_mm;
     struct thread_struct _thread;      // this must be last
-#ifdef CONFIG_VTI
-    void (*schedule_tail) (struct vcpu *);
-    struct trap_bounce trap_bounce;
+
     thash_cb_t *vtlb;
     char irq_new_pending;
     char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
@@ -94,9 +83,7 @@
     //for phycial  emulation
     unsigned long old_rsc;
     int mode_flags;
-
     struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
-#endif // CONFIG_VTI
 };
 
 #define active_mm arch.active_mm
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/linux-xen/asm/pal.h
--- a/xen/include/asm-ia64/linux-xen/asm/pal.h  Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/linux-xen/asm/pal.h  Sat Aug 20 05:19:39 2005
@@ -1559,9 +1559,7 @@
        return iprv.status;
 }
 
-#ifdef CONFIG_VTI
 #include <asm/vmx_pal.h>
-#endif // CONFIG_VTI
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_IA64_PAL_H */
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/mmu_context.h
--- a/xen/include/asm-ia64/mmu_context.h        Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/mmu_context.h        Sat Aug 20 05:19:39 2005
@@ -2,11 +2,7 @@
 #define __ASM_MMU_CONTEXT_H
 //dummy file to resolve non-arch-indep include
 #ifdef XEN
-#ifndef CONFIG_VTI
 #define IA64_REGION_ID_KERNEL 0
-#else // CONFIG_VTI
-#define IA64_REGION_ID_KERNEL 0x1e0000 /* Start from all 1 in highest 4 bits */
-#endif // CONFIG_VTI
 #define ia64_rid(ctx,addr)     (((ctx) << 3) | (addr >> 61))
 
 #ifndef __ASSEMBLY__
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h     Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/privop.h     Sat Aug 20 05:19:39 2005
@@ -133,7 +133,6 @@
     struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, 
major:4; };
 } INST64_M46;
 
-#ifdef CONFIG_VTI
 typedef union U_INST64_M47 {
     IA64_INST inst;
     struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
@@ -168,8 +167,6 @@
     IA64_INST inst;
     struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, 
major:4; };
 } INST64_M6;
-
-#endif // CONFIG_VTI
 
 typedef union U_INST64 {
     IA64_INST inst;
@@ -182,14 +179,12 @@
     INST64_I26 I26;    // mov register to ar (I unit)
     INST64_I27 I27;    // mov immediate to ar (I unit)
     INST64_I28 I28;    // mov from ar (I unit)
-#ifdef CONFIG_VTI
-    INST64_M1  M1;  // ld integer
+    INST64_M1  M1;     // ld integer
     INST64_M2  M2;
     INST64_M3  M3;
-    INST64_M4  M4;  // st integer
+    INST64_M4  M4;     // st integer
     INST64_M5  M5;
-    INST64_M6  M6;  // ldfd floating pointer
-#endif // CONFIG_VTI
+    INST64_M6  M6;     // ldfd floating pointer
     INST64_M28 M28;    // purge translation cache entry
     INST64_M29 M29;    // mov register to ar (M unit)
     INST64_M30 M30;    // mov immediate to ar (M unit)
@@ -204,9 +199,7 @@
     INST64_M44 M44;    // set/reset system mask
     INST64_M45 M45;    // translation purge
     INST64_M46 M46;    // translation access (tpa,tak)
-#ifdef CONFIG_VTI
     INST64_M47 M47;    // purge translation entry
-#endif // CONFIG_VTI
 } INST64;
 
 #define MASK_41 ((UINT64)0x1ffffffffff)
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/regionreg.h
--- a/xen/include/asm-ia64/regionreg.h  Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/regionreg.h  Sat Aug 20 05:19:39 2005
@@ -1,12 +1,6 @@
 #ifndef                _REGIONREG_H_
 #define                _REGIONREG_H_
-#ifdef  CONFIG_VTI
-#define XEN_DEFAULT_RID     0xf00000
-#define DOMAIN_RID_SHIFT    20
-#define DOMAIN_RID_MASK     (~(1U<<DOMAIN_RID_SHIFT -1))
-#else //CONFIG_VTI
 #define XEN_DEFAULT_RID                7
-#endif // CONFIG_VTI
 #define        IA64_MIN_IMPL_RID_MSB   17
 #define _REGION_ID(x)   ({ia64_rr _v; _v.rrval = (long) (x); _v.rid;})
 #define _REGION_PAGE_SIZE(x)    ({ia64_rr _v; _v.rrval = (long) (x); _v.ps;})
@@ -42,4 +36,32 @@
 
 int set_one_rr(unsigned long rr, unsigned long val);
 
+// This function is purely for performance... apparently scrambling
+//  bits in the region id makes for better hashing, which means better
+//  use of the VHPT, which means better performance
+// Note that the only time a RID should be mangled is when it is stored in
+//  a region register; anytime it is "viewable" outside of this module,
+//  it should be unmangled
+
+// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
+// Must ensure these two remain consistent!
+static inline unsigned long
+vmMangleRID(unsigned long RIDVal)
+{
+       union bits64 { unsigned char bytes[4]; unsigned long uint; };
+
+       union bits64 t;
+       unsigned char tmp;
+
+       t.uint = RIDVal;
+       tmp = t.bytes[1];
+       t.bytes[1] = t.bytes[3];
+       t.bytes[3] = tmp;
+
+       return t.uint;
+}
+
+// since vmMangleRID is symmetric, use it for unmangling also
+#define vmUnmangleRID(x)       vmMangleRID(x)
+
 #endif         /* !_REGIONREG_H_ */
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/tlb.h
--- a/xen/include/asm-ia64/tlb.h        Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/tlb.h        Sat Aug 20 05:19:39 2005
@@ -35,17 +35,4 @@
     unsigned long rid;
 } TR_ENTRY;
 
-#ifdef CONFIG_VTI
-typedef union {
-        unsigned long   value;
-        struct {
-                unsigned long ve : 1;
-                unsigned long rv1 : 1;
-                unsigned long ps  : 6;
-                unsigned long rid : 24;
-                unsigned long rv2 : 32;
-        };
-} rr_t;
-#endif // CONFIG_VTI
-
 #endif
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/vmmu.h       Sat Aug 20 05:19:39 2005
@@ -23,10 +23,11 @@
 #ifndef XEN_TLBthash_H
 #define XEN_TLBthash_H
 
-#include "xen/config.h"
-#include "xen/types.h"
-#include "public/xen.h"
-#include "asm/tlb.h"
+#include <xen/config.h>
+#include <xen/types.h>
+#include <public/xen.h>
+#include <asm/tlb.h>
+#include <asm/regionreg.h>
 
 //#define         THASH_TLB_TR            0
 //#define         THASH_TLB_TC            1
@@ -152,7 +153,7 @@
 typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
 typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry);
 typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para);
-typedef rr_t (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
+typedef ia64_rr (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
 typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb, 
         u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
 typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
@@ -329,7 +330,7 @@
 extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
 extern void purge_machine_tc_by_domid(domid_t domid);
 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
-extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
+extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
 extern thash_cb_t *init_domain_tlb(struct vcpu *d);
 
 #define   VTLB_DEBUG
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/vmx.h        Sat Aug 20 05:19:39 2005
@@ -32,9 +32,12 @@
 extern void vmx_init_double_mapping_stub(void);
 extern void vmx_save_state(struct vcpu *v);
 extern void vmx_load_state(struct vcpu *v);
+extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
+#ifdef XEN_DBL_MAPPING
 extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
 extern void vmx_purge_double_mapping(u64, u64, u64);
 extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
+#endif
 
 extern void vmx_wait_io(void);
 extern void vmx_io_assist(struct vcpu *v);
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Sat Aug 20 05:19:39 2005
@@ -308,7 +308,9 @@
     
     vtm=&(vcpu->arch.arch_vmx.vtm);
     VPD_CR(vcpu,itm)=val;
+#ifdef CONFIG_VTI
     vtm_interruption_update(vcpu, vtm);
+#endif
     return IA64_NO_FAULT;
 }
 static inline
@@ -414,7 +416,9 @@
 IA64FAULT
 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
 {
+#ifdef CONFIG_VTI
     guest_write_eoi(vcpu);
+#endif
     return IA64_NO_FAULT;
 }
 
@@ -424,7 +428,9 @@
 {
 
     VPD_CR(vcpu,itv)=val;
+#ifdef CONFIG_VTI
     vtm_set_itv(vcpu);
+#endif
     return IA64_NO_FAULT;
 }
 static inline
@@ -465,13 +471,17 @@
 static inline
 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
 {
+#ifdef CONFIG_VTI
     vtm_set_itc(vcpu, val);
+#endif
     return  IA64_NO_FAULT;
 }
 static inline
 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
 {
+#ifdef CONFIG_VTI
     *val = vtm_get_itc(vcpu);
+#endif
     return  IA64_NO_FAULT;
 }
 static inline
@@ -584,15 +594,22 @@
     return (IA64_NO_FAULT);
 }
 
+/* Another hash performance algorithm */
 #define redistribute_rid(rid)  (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | 
(((rid) >> 8) & 0xff))
 static inline unsigned long
-vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
+vmx_vrrtomrr(VCPU *v, unsigned long val)
 {
     ia64_rr rr;
     u64          rid;
+
     rr.rrval=val;
+    rr.rid = vmMangleRID(v->arch.starting_rid  + rr.rid);
+/* Disable this rid allocation algorithm for now */
+#if 0
     rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
     rr.rid = redistribute_rid(rid);
+#endif 
+
     rr.ve=1;
     return rr.rrval;
 }
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h    Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/vmx_vpd.h    Sat Aug 20 05:19:39 2005
@@ -61,12 +61,6 @@
        unsigned long   lrr1;
        unsigned long   rsv6[46];
 } cr_t;
-
-void vmx_enter_scheduler(void);
-
-//FIXME: Map for LID to vcpu, Eddie
-#define        MAX_NUM_LPS             (1UL<<16)
-extern struct vcpu     *lid_edt[MAX_NUM_LPS];
 
 struct arch_vmx_struct {
 //    struct virutal_platform_def     vmx_platform;
diff -r 97675c2dbb40 -r 1ec2225aa8c6 xen/include/asm-ia64/xenprocessor.h
--- a/xen/include/asm-ia64/xenprocessor.h       Sat Aug 20 04:45:43 2005
+++ b/xen/include/asm-ia64/xenprocessor.h       Sat Aug 20 05:19:39 2005
@@ -50,16 +50,11 @@
        __u64 ri : 2;
        __u64 ed : 1;
        __u64 bn : 1;
-#ifdef CONFIG_VTI
        __u64 ia : 1;
        __u64 vm : 1;
        __u64 reserved5 : 17;
-#else // CONFIG_VTI
-       __u64 reserved4 : 19;
-#endif // CONFIG_VTI
 };
 
-#ifdef  CONFIG_VTI
 /* vmx like above but expressed as bitfields for more efficient access: */
 typedef  union{
     __u64 val;
@@ -218,6 +213,4 @@
         ret;                            \
 })
 
-#endif  //  CONFIG_VTI
-
 #endif // _ASM_IA64_XENPROCESSOR_H

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.