[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Remove all CONFIG_VTI, VTI now works dynamically



# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID babbdd896024460e365332c0656706283a26f75a
# Parent  d2f2c1c26995d909f26c2ce821beff16294e74ad
Remove all CONFIG_VTI, VTI now works dynamically
1.remove vcpu_set_regs and element regs,which are never used
2.remove ia64_prepare_handle_privop,ia64_prepare_handle_break, 
ia64_prepare_handle_reflection, which are never used.
3.modify related macros for adapting to three level physical to machine table
4.remove all CONFIG_VIT
5.merge ia64_switch_to

Signed-off-by Anthony Xu <Anthony.xu@xxxxxxxxx>

diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/Makefile
--- a/xen/arch/ia64/Makefile    Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/Makefile    Thu Sep 22 12:59:57 2005
@@ -12,15 +12,10 @@
        irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
        grant_table.o sn_console.o
 
-# TMP holder to contain *.0 moved out of CONFIG_VTI
-OBJS += vmx_init.o
-
-ifeq ($(CONFIG_VTI),y)
-OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
+OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
        vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
        vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o \
        pal_emul.o vmx_irq_ia64.o
-endif
 
 # lib files from xen/arch/ia64/linux/ (linux/arch/ia64/lib)
 OBJS +=        bitop.o clear_page.o flush.o copy_page_mck.o                    
\
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/Rules.mk
--- a/xen/arch/ia64/Rules.mk    Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/Rules.mk    Thu Sep 22 12:59:57 2005
@@ -1,7 +1,7 @@
 ########################################
 # ia64-specific definitions
 
-CONFIG_VTI     ?= n
+VALIDATE_VT    ?= n
 ifneq ($(COMPILE_ARCH),$(TARGET_ARCH))
 CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux-
 endif
@@ -27,7 +27,7 @@
 CFLAGS  += -DIA64 -DXEN -DLINUX_2_6
 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127
 CFLAGS += -w -g
-ifeq ($(CONFIG_VTI),y)
-CFLAGS  += -DCONFIG_VTI
+ifeq ($(VALIDATE_VT),y)
+CFLAGS  += -DVALIDATE_VT
 endif
 LDFLAGS := -g
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/asm-offsets.c       Thu Sep 22 12:59:57 2005
@@ -9,10 +9,8 @@
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <public/xen.h>
-#ifdef CONFIG_VTI
 #include <asm/tlb.h>
 #include <asm/regs.h>
-#endif // CONFIG_VTI
 
 #define task_struct vcpu
 
@@ -222,14 +220,12 @@
 
        BLANK();
 
-#ifdef  CONFIG_VTI
        DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
        DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, 
arch.insvc[0]));
        DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
        DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
 
        BLANK();
-#endif  //CONFIG_VTI
        //DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, 
sc_ip));
        //DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, 
sc_ar_bsp));
        //DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, 
sc_ar_fpsr));
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/asm-xsi-offsets.c
--- a/xen/arch/ia64/asm-xsi-offsets.c   Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/asm-xsi-offsets.c   Thu Sep 22 12:59:57 2005
@@ -32,10 +32,8 @@
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <public/xen.h>
-#ifdef CONFIG_VTI
 #include <asm/tlb.h>
 #include <asm/regs.h>
-#endif // CONFIG_VTI
 
 #define task_struct vcpu
 
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/linux-xen/entry.S
--- a/xen/arch/ia64/linux-xen/entry.S   Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/linux-xen/entry.S   Thu Sep 22 12:59:57 2005
@@ -223,9 +223,20 @@
 #else
        mov IA64_KR(CURRENT)=in0        // update "current" application register
 #endif
+#ifdef XEN          //for VTI domain current is save to 21 of bank0
+    ;;
+    bsw.0
+    ;;
        mov r8=r13                      // return pointer to previously running 
task
        mov r13=in0                     // set "current" pointer
-       ;;
+    mov r21=in0
+    ;;
+    bsw.1
+       ;;
+#else
+    mov r8=r13          // return pointer to previously running task
+    mov r13=in0         // set "current" pointer
+#endif
        DO_LOAD_SWITCH_STACK
 
 #ifdef CONFIG_SMP
@@ -632,12 +643,14 @@
 #ifdef XEN
        // new domains are cloned but not exec'ed so switch to user mode here
        cmp.ne pKStk,pUStk=r0,r0
-#ifdef CONFIG_VTI
-    br.cond.spnt ia64_leave_hypervisor
-#else // CONFIG_VTI
-    br.cond.spnt ia64_leave_kernel
-#endif // CONFIG_VTI
-
+    adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
+    ;;
+    ld8 r16 = [r16]
+    ;;
+    cmp.ne p6,p7 = r16, r0
+ (p6) br.cond.spnt ia64_leave_hypervisor
+ (p7) br.cond.spnt ia64_leave_kernel
+    ;;
 //    adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
 //    ;;
 //    ld8 r16 = [r16]
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/linux-xen/head.S
--- a/xen/arch/ia64/linux-xen/head.S    Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/linux-xen/head.S    Thu Sep 22 12:59:57 2005
@@ -259,7 +259,7 @@
        /*
         * Switch into virtual mode:
         */
-#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN) && defined(VALIDATE_VT)
        movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\
                  |IA64_PSR_DI)
 #else
@@ -284,7 +284,7 @@
        ;;
 
        // set IVT entry point---can't access I/O ports without it
-#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN) && defined(VALIDATE_VT)
        movl r3=vmx_ia64_ivt
 #else
        movl r3=ia64_ivt
@@ -356,7 +356,7 @@
 
 .load_current:
        // load the "current" pointer (r13) and ar.k6 with the current task
-#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN) && defined(VALIDATE_VT)
        mov r21=r2
        ;;
        bsw.1
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/linux-xen/unaligned.c
--- a/xen/arch/ia64/linux-xen/unaligned.c       Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Thu Sep 22 12:59:57 2005
@@ -201,12 +201,11 @@
 
        RPT(r1), RPT(r2), RPT(r3),
 
-//#if defined(XEN) && defined(CONFIG_VTI)
 #if defined(XEN)
        RPT(r4), RPT(r5), RPT(r6), RPT(r7),
-#else   //CONFIG_VTI
+#else
        RSW(r4), RSW(r5), RSW(r6), RSW(r7),
-#endif  //CONFIG_VTI
+#endif
 
        RPT(r8), RPT(r9), RPT(r10), RPT(r11),
        RPT(r12), RPT(r13), RPT(r14), RPT(r15),
@@ -296,7 +295,6 @@
        return reg;
 }
 
-//#if defined(XEN) && defined(CONFIG_VTI)
 #if defined(XEN)
 void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
@@ -414,7 +412,7 @@
     }
 }
 
-#else // CONFIG_VTI
+#else
 static void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
 {
@@ -559,7 +557,7 @@
                *nat = 0;
        return;
 }
-#endif // CONFIG_VTI
+#endif
 
 
 #ifdef XEN
@@ -595,11 +593,11 @@
                unat = &sw->ar_unat;
        } else {
                addr = (unsigned long)regs;
-#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
                unat = &regs->eml_unat;
-#else //CONFIG_VTI
+#else
                unat = &sw->caller_unat;
-#endif  //CONFIG_VTI
+#endif
        }
        DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
               addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
@@ -785,11 +783,11 @@
                unat = &sw->ar_unat;
        } else {
                addr = (unsigned long)regs;
-#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
                unat = &regs->eml_unat;;
-#else   //CONFIG_VTI
+#else
                unat = &sw->caller_unat;
-#endif  //CONFIG_VTI
+#endif
        }
 
        DPRINT("addr_base=%lx offset=0x%x\n", addr,  GR_OFFS(regnum));
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/mm.c
--- a/xen/arch/ia64/vmx/mm.c    Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/mm.c    Thu Sep 22 12:59:57 2005
@@ -100,8 +100,7 @@
         uregs->ptr is virtual address
         uregs->val is pte value
  */
-#ifdef CONFIG_VTI
-int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom)
+int vmx_do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom)
 {
     int i,cmd;
     u64 mfn, gpfn;
@@ -149,4 +148,3 @@
     }
     return 0;
 }
-#endif
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/vmmu.c  Thu Sep 22 12:59:57 2005
@@ -220,6 +220,7 @@
  * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
  * this interface at all.
  */
+#if 0
 void
 alloc_pmt(struct domain *d)
 {
@@ -234,7 +235,7 @@
     d->arch.pmt = page_to_virt(page);
     memset(d->arch.pmt, 0x55, d->max_pages * 8);
 }
-
+#endif
 /*
  * Insert guest TLB to machine TLB.
  *  data:   In TLB format
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Thu Sep 22 12:59:57 2005
@@ -47,11 +47,13 @@
     vcpu_get_gr_nat(vcpu,17,&r33);
     vcpu_get_gr_nat(vcpu,18,&r34);
     vcpu_get_gr_nat(vcpu,19,&r35);
-    ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
-    vcpu_set_gr(vcpu, 8, ret, 0);
-    vmx_vcpu_increment_iip(vcpu);
-}
-
+    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+    vcpu_set_gr(vcpu, 8, ret, 0);
+    vmx_vcpu_increment_iip(vcpu);
+}
+/* turn off temporarily, we will merge hypercall parameter convention with 
xeno, when
+    VTI domain need to call hypercall */
+#if 0
 unsigned long __hypercall_create_continuation(
     unsigned int op, unsigned int nr_args, ...)
 {
@@ -87,7 +89,7 @@
     va_end(args);
     return op;
 }
-
+#endif
 void hyper_dom_mem_op(void)
 {
     VCPU *vcpu=current;
@@ -184,14 +186,13 @@
 
 static int do_set_shared_page(VCPU *vcpu, u64 gpa)
 {
-    u64 shared_info, o_info;
+    u64 o_info;
     struct domain *d = vcpu->domain;
     struct vcpu *v;
     if(vcpu->domain!=dom0)
         return -EPERM;
-    shared_info = __gpa_to_mpa(vcpu->domain, gpa);
     o_info = (u64)vcpu->domain->shared_info;
-    d->shared_info= (shared_info_t *)__va(shared_info);
+    d->shared_info= (shared_info_t *)domain_mpa_to_imva(vcpu->domain, gpa);
 
     /* Copy existing shared info into new page */
     if (o_info) {
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/vmx_init.c      Thu Sep 22 12:59:57 2005
@@ -163,7 +163,8 @@
        }
 
        /* FIXME: only support PMT table continuously by far */
-       d->arch.pmt = __va(c->pt_base);
+//     d->arch.pmt = __va(c->pt_base);
+
 
        vmx_final_setup_domain(d);
 }
@@ -209,7 +210,6 @@
 }
 
 
-#ifdef CONFIG_VTI
 /*
  * Create a VP on intialized VMX environment.
  */
@@ -333,7 +333,6 @@
                                  pte_xen, pte_vhpt);
 }
 #endif // XEN_DBL_MAPPING
-#endif // CONFIG_VTI
 
 /*
  * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
@@ -355,7 +354,11 @@
     v->arch.privregs = vpd;
        vpd->virt_env_vaddr = vm_buffer;
 
-#ifdef CONFIG_VTI
+       /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
+        * to this solution. Maybe it can be deferred until we know created
+        * one as vmx domain */
+       v->arch.vtlb = init_domain_tlb(v);
+
        /* v->arch.schedule_tail = arch_vmx_do_launch; */
        vmx_create_vp(v);
 
@@ -369,7 +372,6 @@
 
        vlsapic_reset(v);
        vtm_init(v);
-#endif
 
        /* Other vmx specific initialization work */
 }
@@ -483,7 +485,7 @@
            for (j = io_ranges[i].start;
                 j < io_ranges[i].start + io_ranges[i].size;
                 j += PAGE_SIZE)
-               map_domain_io_page(d, j);
+               map_domain_page(d, j, io_ranges[i].type);
        }
 
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/vmx_irq_ia64.c
--- a/xen/arch/ia64/vmx/vmx_irq_ia64.c  Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c  Thu Sep 22 12:59:57 2005
@@ -36,7 +36,6 @@
 
 #define IRQ_DEBUG      0
 
-#ifdef  CONFIG_VTI
 #define vmx_irq_enter()                \
        add_preempt_count(HARDIRQ_OFFSET);
 
@@ -130,4 +129,3 @@
        if ( wake_dom0 && current != dom0 ) 
                vcpu_wake(dom0->vcpu[0]);
 }
-#endif
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/vmx/vmx_process.c   Thu Sep 22 12:59:57 2005
@@ -314,11 +314,12 @@
 //    prepare_if_physical_mode(v);
 
     if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
-        if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, 
data->ppn>>(PAGE_SHIFT-12))){
+        if(v->domain!=dom0&&type==DSIDE_TLB && 
__gpfn_is_io(v->domain,data->ppn>>(PAGE_SHIFT-12))){
             
vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
             emulate_io_inst(v, vadr, data->ma);
             return IA64_FAULT;
         }
+
        if ( data->ps != vrr.ps ) {
                machine_tlb_insert(v, data);
        }
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/domain.c        Thu Sep 22 12:59:57 2005
@@ -7,7 +7,7 @@
  *  Copyright (C) 2005 Intel Co
  *     Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx>
  *
- * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx> Add CONFIG_VTI domain 
support
+ * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@xxxxxxxxx> Add VTI domain support
  */
 
 #include <xen/config.h>
@@ -203,13 +203,6 @@
        v->vcpu_info = &(d->shared_info->vcpu_data[0]);
 
        d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
-
-#ifdef CONFIG_VTI
-       /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
-        * to this solution. Maybe it can be deferred until we know created
-        * one as vmx domain */
-       v->arch.vtlb = init_domain_tlb(v);
-#endif
 
        /* We may also need emulation rid for region4, though it's unlikely
         * to see guest issue uncacheable access in metaphysical mode. But
@@ -361,7 +354,6 @@
        regs->ar_fpsr = FPSR_DEFAULT;
 
        if (VMX_DOMAIN(v)) {
-#ifdef CONFIG_VTI
                vmx_init_all_rr(v);
                if (d == dom0)
 //                 VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
@@ -369,7 +361,6 @@
                /* Virtual processor context setup */
                VCPU(v, vpsr) = IA64_PSR_BN;
                VCPU(v, dcr) = 0;
-#endif
        } else {
                init_all_rr(v);
                if (d == dom0) 
@@ -480,7 +471,7 @@
        }
        else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
-
+#if 0
 /* map a physical address with specified I/O flag */
 void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long 
flags)
 {
@@ -517,7 +508,7 @@
        }
        else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
 }
-
+#endif
 void mpafoo(unsigned long mpaddr)
 {
        extern unsigned long privop_trace;
@@ -571,7 +562,7 @@
 }
 
 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
-#ifndef CONFIG_VTI
+#if 1
 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
 {
        unsigned long pte = lookup_domain_mpa(d,mpaddr);
@@ -582,14 +573,14 @@
        imva |= mpaddr & ~PAGE_MASK;
        return(imva);
 }
-#else // CONFIG_VTI
+#else
 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
 {
     unsigned long imva = __gpa_to_mpa(d, mpaddr);
 
     return __va(imva);
 }
-#endif // CONFIG_VTI
+#endif
 
 // remove following line if not privifying in memory
 //#define HAVE_PRIVIFY_MEMORY
@@ -860,7 +851,7 @@
        if ( rc != 0 )
            return rc;
 
-#ifdef CONFIG_VTI
+#ifdef VALIDATE_VT
        /* Temp workaround */
        if (running_on_sim)
            dsi.xen_section_string = (char *)1;
@@ -920,7 +911,7 @@
        for ( i = 1; i < MAX_VIRT_CPUS; i++ )
            d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 
-#ifdef CONFIG_VTI
+#ifdef VALIDATE_VT 
        /* Construct a frame-allocation list for the initial domain, since these
         * pages are allocated by boot allocator and pfns are not set properly
         */
@@ -938,10 +929,6 @@
            machine_to_phys_mapping[mfn] = mfn;
        }
 
-       /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
-        * for dom0
-        */
-       d->arch.pmt = NULL;
 #endif
 
        /* Copy the OS image. */
@@ -1162,12 +1149,8 @@
 void sync_vcpu_execstate(struct vcpu *v)
 {
        ia64_save_fpu(v->arch._thread.fph);
-#ifdef CONFIG_VTI
        if (VMX_DOMAIN(v))
                vmx_save_state(v);
-#else
-       if (0) do {} while(0);
-#endif
        else {
                if (IA64_HAS_EXTRA_STATE(v))
                        ia64_save_extra(v);
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/grant_table.c
--- a/xen/arch/ia64/xen/grant_table.c   Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/grant_table.c   Thu Sep 22 12:59:57 2005
@@ -1,4 +1,3 @@
-#ifndef CONFIG_VTI
 // temporarily in arch/ia64 until can merge into common/grant_table.c
 /******************************************************************************
  * common/grant_table.c
@@ -1452,7 +1451,6 @@
 {
     /* Nothing. */
 }
-#endif
 
 /*
  * Local variables:
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/hypercall.c     Thu Sep 22 12:59:57 2005
@@ -178,11 +178,9 @@
                regs->r8 = do_event_channel_op(regs->r14);
                break;
 
-#ifndef CONFIG_VTI
            case __HYPERVISOR_grant_table_op:
                regs->r8 = do_grant_table_op(regs->r14, regs->r15, regs->r16);
                break;
-#endif
 
            case __HYPERVISOR_console_io:
                regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/privop.c        Thu Sep 22 12:59:57 2005
@@ -726,7 +726,6 @@
                return IA64_ILLOP_FAULT;
        }
        //if (isrcode != 1 && isrcode != 2) return 0;
-       vcpu_set_regs(vcpu,regs);
        privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
        // its OK for a privified-cover to be executed in user-land
        fault = priv_handle_op(vcpu,regs,privlvl);
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/process.c       Thu Sep 22 12:59:57 2005
@@ -67,14 +67,14 @@
        unsigned long rr7;
        //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
        //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
-#ifdef CONFIG_VTI
        /* rr7 will be postponed to last point when resuming back to guest */
-       vmx_load_all_rr(current);
-#else // CONFIG_VTI
-       if (rr7 = load_region_regs(current)) {
-               printk("schedule_tail: change to rr7 not yet implemented\n");
-       }
-#endif // CONFIG_VTI
+    if(VMX_DOMAIN(current)){
+       vmx_load_all_rr(current);
+    }else{
+           if (rr7 = load_region_regs(current)) {
+                   printk("schedule_tail: change to rr7 not yet 
implemented\n");
+       }
+    }
 }
 
 void tdpfoo(void) { }
@@ -755,7 +755,7 @@
 {
     struct mc_state *mcs = &mc_state[smp_processor_id()];
     VCPU *vcpu = current;
-    struct cpu_user_regs *regs = vcpu->arch.regs;
+    struct cpu_user_regs *regs = vcpu_regs(vcpu);
     unsigned int i;
     va_list args;
 
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c     Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/regionreg.c     Thu Sep 22 12:59:57 2005
@@ -227,7 +227,7 @@
                return 0;
        }
 
-#ifdef CONFIG_VTI
+#if 0
        memrrv.rrval = rrv.rrval;
        if (rreg == 7) {
                newrrv.rid = newrid;
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/vcpu.c  Thu Sep 22 12:59:57 2005
@@ -1977,7 +1977,3 @@
        return (IA64_ILLOP_FAULT);
 }
 
-void vcpu_set_regs(VCPU *vcpu, REGS *regs)
-{
-       vcpu->arch.regs = regs;
-}
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/xenmem.c
--- a/xen/arch/ia64/xen/xenmem.c        Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/xenmem.c        Thu Sep 22 12:59:57 2005
@@ -28,17 +28,13 @@
 /*
  * Set up the page tables.
  */
-#ifdef CONFIG_VTI
 unsigned long *mpt_table;
 unsigned long mpt_table_size;
-#endif // CONFIG_VTI
 
 void
 paging_init (void)
 {
        struct pfn_info *pg;
-
-#ifdef CONFIG_VTI
        unsigned int mpt_order;
        /* Create machine to physical mapping table
         * NOTE: similar to frame table, later we may need virtually
@@ -53,8 +49,6 @@
 
        printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
        memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
-#endif // CONFIG_VTI
-
        /* Other mapping setup */
 
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff -r d2f2c1c26995 -r babbdd896024 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Wed Sep 21 21:13:16 2005
+++ b/xen/arch/ia64/xen/xenmisc.c       Thu Sep 22 12:59:57 2005
@@ -65,7 +65,7 @@
 
 void sync_lazy_execstate_cpu(unsigned int cpu) {}
 
-#ifdef CONFIG_VTI
+#if 0
 int grant_table_create(struct domain *d) { return 0; }
 void grant_table_destroy(struct domain *d) { return; }
 #endif
@@ -77,7 +77,6 @@
        raise_softirq(AC_TIMER_SOFTIRQ);
 }
 
-#ifndef CONFIG_VTI
 unsigned long
 __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
 {
@@ -93,7 +92,7 @@
                return ((pte & _PFN_MASK) >> PAGE_SHIFT);
        }
 }
-
+#if 0
 u32
 __mfn_to_gpfn(struct domain *d, unsigned long frame)
 {
@@ -288,14 +287,14 @@
 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
 //printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
-#ifdef CONFIG_VTI
-       vtm_domain_out(prev);
-#endif
+    if(VMX_DOMAIN(prev)){
+       vtm_domain_out(prev);
+    }
        context_switch_count++;
        switch_to(prev,next,prev);
-#ifdef CONFIG_VTI
-        vtm_domain_in(current);
-#endif
+    if(VMX_DOMAIN(current)){
+        vtm_domain_in(current);
+    }
 
 // leave this debug for now: it acts as a heartbeat when more than
 // one domain is active
@@ -307,16 +306,15 @@
 if (!i--) { printk("+",id); i = 1000000; }
 }
 
-#ifdef CONFIG_VTI
-       if (VMX_DOMAIN(current))
+       if (VMX_DOMAIN(current)){
                vmx_load_all_rr(current);
-#else
-       if (!is_idle_task(current->domain)) {
-               load_region_regs(current);
-               if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
-       }
-       if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
-#endif
+    }else{
+       if (!is_idle_task(current->domain)) {
+               load_region_regs(current);
+                   if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
+       }
+           if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
+    }
 }
 
 void context_switch_finalise(struct vcpu *next)
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/config.h     Thu Sep 22 12:59:57 2005
@@ -199,11 +199,11 @@
      access_ok(type,addr,count*size))
 
 // see drivers/char/console.c
-#ifndef CONFIG_VTI
+#ifndef VALIDATE_VT
 #define        OPT_CONSOLE_STR "com1"
-#else // CONFIG_VTI
+#else
 #define        OPT_CONSOLE_STR "com2"
-#endif // CONFIG_VTI
+#endif
 
 #define __attribute_used__     __attribute__ ((unused))
 #define __nocast
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/domain.h     Thu Sep 22 12:59:57 2005
@@ -25,7 +25,6 @@
     int breakimm;
 
     int imp_va_msb;
-    unsigned long *pmt;        /* physical to machine table */
     /* System pages out of guest memory, like for xenstore/console */
     unsigned long sys_pgnr;
     unsigned long max_pfn; /* Max pfn including I/O holes */
@@ -62,7 +61,6 @@
        unsigned long xen_itm;
        unsigned long xen_timer_interval;
 #endif
-    void *regs;        /* temporary until find a better way to do privops */
     mapped_regs_t *privregs; /* save the state of vcpu */
     int metaphysical_rr0;              // from arch_domain (so is pinned)
     int metaphysical_rr4;              // from arch_domain (so is pinned)
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/ia64_int.h
--- a/xen/include/asm-ia64/ia64_int.h   Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/ia64_int.h   Thu Sep 22 12:59:57 2005
@@ -45,7 +45,7 @@
 #define        IA64_DISIST_FAULT       (IA64_GENEX_VECTOR | 0x40)
 #define        IA64_ILLDEP_FAULT       (IA64_GENEX_VECTOR | 0x80)
 #define        IA64_DTLB_FAULT         (IA64_DATA_TLB_VECTOR)
-#define IA64_VHPT_FAULT     (IA64_VHPT_TRANS_VECTOR | 0x10)
+#define IA64_VHPT_FAULT     (IA64_VHPT_TRANS_VECTOR | 0x7)
 #if !defined(__ASSEMBLY__)
 typedef unsigned long IA64FAULT;
 typedef unsigned long IA64INTVECTOR;
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/mm.h Thu Sep 22 12:59:57 2005
@@ -134,7 +134,7 @@
 
 static inline void put_page(struct pfn_info *page)
 {
-#ifdef CONFIG_VTI      // doesn't work with non-VTI in grant tables yet
+#ifdef VALIDATE_VT     // doesn't work with non-VTI in grant tables yet
     u32 nx, x, y = page->count_info;
 
     do {
@@ -152,7 +152,7 @@
 static inline int get_page(struct pfn_info *page,
                            struct domain *domain)
 {
-#ifdef CONFIG_VTI
+#ifdef VALIDATE_VT
     u64 x, nx, y = *((u64*)&page->count_info);
     u32 _domain = pickle_domptr(domain);
 
@@ -404,7 +404,6 @@
 extern unsigned long totalram_pages;
 extern int nr_swap_pages;
 
-#ifdef CONFIG_VTI
 extern unsigned long *mpt_table;
 #undef machine_to_phys_mapping
 #define machine_to_phys_mapping        mpt_table
@@ -415,34 +414,29 @@
 /* If pmt table is provided by control pannel later, we need __get_user
 * here. However if it's allocated by HV, we should access it directly
 */
-#define get_mfn_from_pfn(d, gpfn)                      \
-    ((d) == dom0 ? gpfn :                                      \
-       (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] :      \
-               INVALID_MFN))
 
 #define __mfn_to_gpfn(_d, mfn)                 \
     machine_to_phys_mapping[(mfn)]
 
 #define __gpfn_to_mfn(_d, gpfn)                        \
-    get_mfn_from_pfn((_d), (gpfn))
+    __gpfn_to_mfn_foreign((_d), (gpfn))
 
 #define __gpfn_invalid(_d, gpfn)                       \
-       (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
+       (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK)
 
 #define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
 
 /* Return I/O type if trye */
 #define __gpfn_is_io(_d, gpfn)                         \
        (__gpfn_valid(_d, gpfn) ?                       \
-       (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
+       (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
 
 #define __gpfn_is_mem(_d, gpfn)                                \
        (__gpfn_valid(_d, gpfn) ?                       \
-       ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
-
-
-#define __gpa_to_mpa(_d, gpa)   \
-    ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
-#endif // CONFIG_VTI
+       (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT) & GPFN_IO_MASK) == 
GPFN_MEM) : 0)
+
+
+//#define __gpa_to_mpa(_d, gpa)   \
+//    ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
 
 #endif /* __ASM_IA64_MM_H__ */
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/privop.h
--- a/xen/include/asm-ia64/privop.h     Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/privop.h     Thu Sep 22 12:59:57 2005
@@ -2,11 +2,8 @@
 #define _XEN_IA64_PRIVOP_H
 
 #include <asm/ia64_int.h>
-//#ifdef CONFIG_VTI
 #include <asm/vmx_vcpu.h>
-//#else //CONFIG_VTI
 #include <asm/vcpu.h>
-//#endif //CONFIG_VTI
 
 typedef unsigned long IA64_INST;
 
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Thu Sep 22 12:59:57 2005
@@ -62,7 +62,7 @@
 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
 
 
-/* next all for CONFIG_VTI APIs definition */
+/* next all for VTI domain APIs definition */
 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
@@ -252,12 +252,9 @@
 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
 {
     vtime_t     *vtm;
-    
     vtm=&(vcpu->arch.arch_vmx.vtm);
     VCPU(vcpu,itm)=val;
-#ifdef CONFIG_VTI
     vtm_interruption_update(vcpu, vtm);
-#endif
     return IA64_NO_FAULT;
 }
 static inline
@@ -292,9 +289,7 @@
 IA64FAULT
 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
 {
-#ifdef CONFIG_VTI
     guest_write_eoi(vcpu);
-#endif
     return IA64_NO_FAULT;
 }
 
@@ -304,9 +299,7 @@
 {
 
     VCPU(vcpu,itv)=val;
-#ifdef CONFIG_VTI
     vtm_set_itv(vcpu);
-#endif
     return IA64_NO_FAULT;
 }
 static inline
@@ -347,17 +340,13 @@
 static inline
 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
 {
-#ifdef CONFIG_VTI
     vtm_set_itc(vcpu, val);
-#endif
     return  IA64_NO_FAULT;
 }
 static inline
 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
 {
-#ifdef CONFIG_VTI
     *val = vtm_get_itc(vcpu);
-#endif
     return  IA64_NO_FAULT;
 }
 static inline
diff -r d2f2c1c26995 -r babbdd896024 xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h  Wed Sep 21 21:13:16 2005
+++ b/xen/include/asm-ia64/xensystem.h  Thu Sep 22 12:59:57 2005
@@ -34,7 +34,7 @@
 #define IA64_HAS_EXTRA_STATE(t) 0
 
 #undef __switch_to
-#ifdef CONFIG_VTI
+#if     1
 extern struct task_struct *vmx_ia64_switch_to (void *next_task);
 #define __switch_to(prev,next,last) do {       \
        ia64_save_fpu(prev->arch._thread.fph);  \
@@ -51,10 +51,13 @@
                if (IA64_HAS_EXTRA_STATE(next)) \
                        ia64_save_extra(next);  \
        }                                       \
-       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
-       (last) = vmx_ia64_switch_to((next));        \
+       /*ia64_psr(ia64_task_regs(next))->dfh = 
!ia64_is_local_fpu_owner(next);*/                        \
+       (last) = ia64_switch_to((next));        \
+       if (!VMX_DOMAIN(current)){                   \
+          vcpu_set_next_timer(current);                \
+       }                                       \
 } while (0)
-#else // CONFIG_VTI
+#else
 #define __switch_to(prev,next,last) do {                                       
                 \
        ia64_save_fpu(prev->arch._thread.fph);                                  
                \
        ia64_load_fpu(next->arch._thread.fph);                                  
                \
@@ -66,7 +69,7 @@
        (last) = ia64_switch_to((next));                                        
                 \
        vcpu_set_next_timer(current);                                           
                \
 } while (0)
-#endif // CONFIG_VTI
+#endif
 
 #undef switch_to
 // FIXME SMP... see system.h, does this need to be different?
diff -r d2f2c1c26995 -r babbdd896024 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed Sep 21 21:13:16 2005
+++ b/xen/include/public/arch-ia64.h    Thu Sep 22 12:59:57 2005
@@ -232,13 +232,9 @@
                // FIXME: tmp[8] temp'ly being used for virtual psr.pp
          };
         };
-#if 0
-#ifdef CONFIG_VTI
        unsigned long           reserved6[3456];
        unsigned long           vmm_avail[128];
        unsigned long           reserved7[4096];
-#endif
-#endif
 } mapped_regs_t;
 
 typedef struct {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.