[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Some outstanding bug fixes found in VT merge
# HG changeset patch # User djm@xxxxxxxxxxxxxxx # Node ID 760f5e85c706ed0cb1510fc483fb043923382c9c # Parent 54b112b314fe22c2cf62c79cb4f10b0ab9265c14 Some outstanding bug fixes found in VT merge - Consistence of region id mangling algrithm: - Metaphysical RID is not mangled, which may conflict with other domain's virtual RID - Sometimes rr0 is mangled, but sometimes not - Sometimes only rid value is saved to saved_rr0_metaphysical, but sometimes the whole value. - Nat bit comsumption happens but handled as priv_emulate to forward progress. But this is definitely wrong. We found reason of nat consumption from fast_rfi, which doesn't save unat again after spill guest states, and then use guest unat to fill guest states when return. - In some corner case, timer interrupt handler won't update itm and then return directly. When that happens, machine timer interrupt disappears until guest timer interrupt sets v_itm actively. But vti domain depends on ac_timer while the latter will stop when above condition happens. Then if current context is vti domain, context switch disappears and machine halt. Also many compatibility issues to support non-vti and vti domain are solved,eg: - Changing lazy PAL mapping switch to eager switch per domain switch, since vti domain always depends on pal call. - evtchn_notify should also vcpu_wake target domain, since vti domain may block for io emulation. Xenolinux is free of this issue, since it's always runnable. Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx> Signed-off-by Anthony Xu <anthony.xu@xxxxxxxxx> diff -r 54b112b314fe -r 760f5e85c706 xen/Rules.mk --- a/xen/Rules.mk Wed Oct 12 23:12:59 2005 +++ b/xen/Rules.mk Thu Oct 13 20:24:45 2005 @@ -47,6 +47,7 @@ include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk +CFLAGS += -D__HYPERVISOR__ ifneq ($(debug),y) CFLAGS += -DNDEBUG ifeq ($(verbose),y) diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/Rules.mk --- a/xen/arch/ia64/Rules.mk Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/Rules.mk Thu Oct 13 20:24:45 2005 @@ -24,7 +24,7 @@ -I$(BASEDIR)/include/asm-ia64/linux-null \ -I$(BASEDIR)/arch/ia64/linux -I$(BASEDIR)/arch/ia64/linux-xen CFLAGS += -Wno-pointer-arith -Wredundant-decls -CFLAGS += -DIA64 -DXEN -DLINUX_2_6 +CFLAGS += -DIA64 -DXEN -DLINUX_2_6 -DV_IOSAPIC_READY CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127 CFLAGS += -w -g ifeq ($(VALIDATE_VT),y) diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/asm-offsets.c --- a/xen/arch/ia64/asm-offsets.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/asm-offsets.c Thu Oct 13 20:24:45 2005 @@ -59,6 +59,8 @@ DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum)); DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0])); DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0])); + DEFINE(XSI_B0NATS_OFS, offsetof(mapped_regs_t, vbnat)); + DEFINE(XSI_B1NATS_OFS, offsetof(mapped_regs_t, vnat)); DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0])); DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode)); DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs)); @@ -79,6 +81,7 @@ //DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand)); //DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal)); //DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid)); + DEFINE(IA64_PGD, offsetof(struct domain, arch.mm)); DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp)); DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack)); diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/linux-xen/entry.S --- a/xen/arch/ia64/linux-xen/entry.S Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/linux-xen/entry.S Thu Oct 13 20:24:45 2005 @@ -900,10 +900,17 @@ adds r7 = PT(EML_UNAT)+16,r12 ;; ld8 r7 = [r7] + ;; +#if 0 +leave_kernel_self: + cmp.ne p8,p0 = r0, r7 +(p8) br.sptk.few leave_kernel_self + ;; +#endif (p6) br.call.sptk.many b0=deliver_pending_interrupt ;; mov ar.pfs=loc0 - mov ar.unat=r7 /* load eml_unat */ + mov ar.unat=r7 /* load eml_unat */ mov r31=r0 diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vmx_entry.S --- a/xen/arch/ia64/vmx/vmx_entry.S Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vmx_entry.S Thu Oct 13 20:24:45 2005 @@ -720,11 +720,11 @@ // re-pin mappings for guest_vhpt - mov r24=IA64_TR_VHPT + mov r24=IA64_TR_PERVP_VHPT movl r25=PAGE_KERNEL ;; or loc5 = r25,loc5 // construct PA | page properties - mov r23 = VCPU_TLB_SHIFT<<2 + mov r23 = IA64_GRANULE_SHIFT <<2 ;; ptr.d in3,r23 ;; diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vmx_init.c --- a/xen/arch/ia64/vmx/vmx_init.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vmx_init.c Thu Oct 13 20:24:45 2005 @@ -47,6 +47,7 @@ #include <asm/processor.h> #include <asm/vmx.h> #include <xen/mm.h> +#include <public/arch-ia64.h> /* Global flag to identify whether Intel vmx feature is on */ u32 vmx_enabled = 0; @@ -134,39 +135,6 @@ /* Init stub for rr7 switch */ vmx_init_double_mapping_stub(); #endif -} - -void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c) -{ - struct domain *d = v->domain; - shared_iopage_t *sp; - - ASSERT(d != dom0); /* only for non-privileged vti domain */ - d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg); - sp = get_sp(d); - memset((char *)sp,0,PAGE_SIZE); - /* FIXME: temp due to old CP */ - sp->sp_global.eport = 2; -#ifdef V_IOSAPIC_READY - sp->vcpu_number = 1; -#endif - /* TEMP */ - d->arch.vmx_platform.pib_base = 0xfee00000UL; - - /* One more step to enable interrupt assist */ - set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags); - /* Only open one port for I/O and interrupt emulation */ - if (v == d->vcpu[0]) { - memset(&d->shared_info->evtchn_mask[0], 0xff, - sizeof(d->shared_info->evtchn_mask)); - clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]); - } - - /* FIXME: only support PMT table continuously by far */ -// d->arch.pmt = __va(c->pt_base); - - - vmx_final_setup_domain(d); } typedef union { @@ -376,40 +344,6 @@ /* Other vmx specific initialization work */ } -/* - * Following stuff should really move to domain builder. However currently - * XEN/IA64 doesn't export physical -> machine page table to domain builder, - * instead only the copy. Also there's no hypercall to notify hypervisor - * IO ranges by far. Let's enhance it later. - */ - -#define MEM_G (1UL << 30) -#define MEM_M (1UL << 20) - -#define MMIO_START (3 * MEM_G) -#define MMIO_SIZE (512 * MEM_M) - -#define VGA_IO_START 0xA0000UL -#define VGA_IO_SIZE 0x20000 - -#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) -#define LEGACY_IO_SIZE (64*MEM_M) - -#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) -#define IO_PAGE_SIZE PAGE_SIZE - -#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) -#define STORE_PAGE_SIZE PAGE_SIZE - -#define IO_SAPIC_START 0xfec00000UL -#define IO_SAPIC_SIZE 0x100000 - -#define PIB_START 0xfee00000UL -#define PIB_SIZE 0x100000 - -#define GFW_START (4*MEM_G -16*MEM_M) -#define GFW_SIZE (16*MEM_M) - typedef struct io_range { unsigned long start; unsigned long size; @@ -424,17 +358,25 @@ {PIB_START, PIB_SIZE, GPFN_PIB}, }; -#define VMX_SYS_PAGES (2 + GFW_SIZE >> PAGE_SHIFT) +#define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT)) #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES) int vmx_alloc_contig_pages(struct domain *d) { - unsigned int order, i, j; - unsigned long start, end, pgnr, conf_nr; + unsigned int order; + unsigned long i, j, start, end, pgnr, conf_nr; struct pfn_info *page; struct vcpu *v = d->vcpu[0]; ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags)); + + /* Mark I/O ranges */ + for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { + for (j = io_ranges[i].start; + j < io_ranges[i].start + io_ranges[i].size; + j += PAGE_SIZE) + map_domain_page(d, j, io_ranges[i].type); + } conf_nr = VMX_CONFIG_PAGES(d); order = get_order_from_pages(conf_nr); @@ -462,10 +404,20 @@ d->arch.max_pfn = end >> PAGE_SHIFT; - order = get_order_from_pages(VMX_SYS_PAGES); + order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT); if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { printk("Could not allocate order=%d pages for vmx contig alloc\n", order); + return -1; + } + + /* Map guest firmware */ + pgnr = page_to_pfn(page); + for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++) + map_domain_page(d, i, pgnr << PAGE_SHIFT); + + if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) { + printk("Could not allocate order=1 pages for vmx contig alloc\n"); return -1; } @@ -474,20 +426,42 @@ map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT); pgnr++; map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT); - pgnr++; - - /* Map guest firmware */ - for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++) - map_domain_page(d, i, pgnr << PAGE_SHIFT); - - /* Mark I/O ranges */ - for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { - for (j = io_ranges[i].start; - j < io_ranges[i].start + io_ranges[i].size; - j += PAGE_SIZE) - map_domain_page(d, j, io_ranges[i].type); - } set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags); return 0; } + +void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c) +{ + struct domain *d = v->domain; + shared_iopage_t *sp; + + ASSERT(d != dom0); /* only for non-privileged vti domain */ + d->arch.vmx_platform.shared_page_va = + __va(__gpa_to_mpa(d, IO_PAGE_START)); + sp = get_sp(d); + //memset((char *)sp,0,PAGE_SIZE); + //sp->sp_global.eport = 2; +#ifdef V_IOSAPIC_READY + sp->vcpu_number = 1; +#endif + /* TEMP */ + d->arch.vmx_platform.pib_base = 0xfee00000UL; + + /* One more step to enable interrupt assist */ + set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags); + /* Only open one port for I/O and interrupt emulation */ + if (v == d->vcpu[0]) { + memset(&d->shared_info->evtchn_mask[0], 0xff, + sizeof(d->shared_info->evtchn_mask)); + clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]); + } + + /* FIXME: only support PMT table continuously by far */ +// d->arch.pmt = __va(c->pt_base); + + + vmx_final_setup_domain(d); +} + + diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vmx_irq_ia64.c --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c Thu Oct 13 20:24:45 2005 @@ -101,7 +101,10 @@ if (vector != IA64_TIMER_VECTOR) { /* FIXME: Leave IRQ re-route later */ - vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector); + if (!VMX_DOMAIN(dom0->vcpu[0])) + vcpu_pend_interrupt(dom0->vcpu[0],vector); + else + vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector); wake_dom0 = 1; } else { // FIXME: Handle Timer only now diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vmx_process.c --- a/xen/arch/ia64/vmx/vmx_process.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vmx_process.c Thu Oct 13 20:24:45 2005 @@ -271,7 +271,7 @@ { IA64_PSR vpsr; CACHE_LINE_TYPE type; - u64 vhpt_adr; + u64 vhpt_adr, gppa; ISR misr; ia64_rr vrr; REGS *regs; @@ -314,9 +314,9 @@ // prepare_if_physical_mode(v); if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ - if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,data->ppn>>(PAGE_SHIFT-12))){ - vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); - emulate_io_inst(v, vadr, data->ma); + gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); + if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){ + emulate_io_inst(v, gppa, data->ma); return IA64_FAULT; } diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vmx_support.c --- a/xen/arch/ia64/vmx/vmx_support.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vmx_support.c Thu Oct 13 20:24:45 2005 @@ -158,7 +158,7 @@ #ifdef V_IOSAPIC_READY vlapic_update_ext_irq(v); #else - panic("IOSAPIC model is missed in qemu\n"); + //panic("IOSAPIC model is missed in qemu\n"); #endif return; } diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/vmx/vtlb.c --- a/xen/arch/ia64/vmx/vtlb.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/vmx/vtlb.c Thu Oct 13 20:24:45 2005 @@ -387,6 +387,15 @@ thash_insert(hcb->ts->vhpt, entry, va); return; } + +#if 1 + vrr=vmx_vcpu_rr(current, va); + if (vrr.ps != entry->ps) { + printk("not preferred ps with va: 0x%lx\n", va); + return; + } +#endif + flag = 1; gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT; ppns = PAGEALIGN((entry->ppn<<12),entry->ps); diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/dom0_ops.c --- a/xen/arch/ia64/xen/dom0_ops.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/dom0_ops.c Thu Oct 13 20:24:45 2005 @@ -177,13 +177,8 @@ for ( i = start_page; i < (start_page + nr_pages); i++ ) { - page = map_new_domain_page(d, i << PAGE_SHIFT); - if ( page == NULL ) - { - ret = -ENOMEM; - break; - } - pfn = page_to_pfn(page); + pfn = __gpfn_to_mfn_foreign(d, i); + if ( put_user(pfn, buffer) ) { ret = -EFAULT; diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/dom_fw.c --- a/xen/arch/ia64/xen/dom_fw.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/dom_fw.c Thu Oct 13 20:24:45 2005 @@ -301,7 +301,7 @@ // pal code must be mapped by a TR when pal is called, however // calls are rare enough that we will map it lazily rather than // at every context switch - efi_map_pal_code(); + //efi_map_pal_code(); switch (index) { case PAL_MEM_ATTRIB: status = ia64_pal_mem_attrib(&r9); diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/domain.c Thu Oct 13 20:24:45 2005 @@ -59,6 +59,7 @@ // initialized by arch/ia64/setup.c:find_initrd() unsigned long initrd_start = 0, initrd_end = 0; +extern unsigned long running_on_sim; #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend)) @@ -190,6 +191,9 @@ while (1); } memset(d->shared_info, 0, PAGE_SIZE); + if (v == d->vcpu[0]) + memset(&d->shared_info->evtchn_mask[0], 0xff, + sizeof(d->shared_info->evtchn_mask)); #if 0 d->vcpu[0].arch.privregs = alloc_xenheap_pages(get_order(sizeof(mapped_regs_t))); @@ -271,6 +275,14 @@ if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) return 0; + /* Sync d/i cache conservatively */ + if (!running_on_sim) { + ret = ia64_pal_cache_flush(4, 0, &progress, NULL); + if (ret != PAL_STATUS_SUCCESS) + panic("PAL CACHE FLUSH failed for domain.\n"); + printk("Sync i/d cache for dom0 image SUCC\n"); + } + if (c->flags & VGCF_VMX_GUEST) { if (!vmx_enabled) { printk("No VMX hardware feature for vmx domain.\n"); @@ -547,7 +559,8 @@ if (pte_present(*pte)) { //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte)); return *(unsigned long *)pte; - } + } else if (VMX_DOMAIN(d->vcpu[0])) + return GPFN_INV_MASK; } } } @@ -799,7 +812,6 @@ set_bit(_DOMF_physdev_access, &d->domain_flags); } -extern unsigned long running_on_sim; unsigned int vmx_dom0 = 0; int construct_dom0(struct domain *d, unsigned long image_start, unsigned long image_len, diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/hyperprivop.S --- a/xen/arch/ia64/xen/hyperprivop.S Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/hyperprivop.S Thu Oct 13 20:24:45 2005 @@ -807,8 +807,11 @@ // OK, now all set to go except for switch to virtual bank1 mov r22=1;; st4 [r20]=r22; mov r30=r2; mov r29=r3;; + adds r16=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18; adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;; + ld8 r16=[r16];; + mov ar.unat=r16;; bsw.1;; // FIXME?: ar.unat is not really handled correctly, // but may not matter if the OS is NaT-clean diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/ivt.S --- a/xen/arch/ia64/xen/ivt.S Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/ivt.S Thu Oct 13 20:24:45 2005 @@ -1460,7 +1460,28 @@ // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) ENTRY(disabled_fp_reg) #ifdef XEN +#if 0 + mov r20=pr + movl r16=0x2000000000000000 + movl r17=0x2000000000176b60 + mov r18=cr.iip + mov r19=rr[r16] + movl r22=0xe95d0439 + ;; + mov pr=r0,-1 + ;; + cmp.eq p6,p7=r22,r19 + ;; + (p6) cmp.eq p8,p9=r17,r18 + (p8) br.sptk.few floating_panic + ;; + mov pr=r20,-1 + ;; +#endif REFLECT(25) +//floating_panic: +// br.sptk.many floating_panic + ;; #endif DBG_FAULT(25) rsm psr.dfh // ensure we can access fph diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/process.c --- a/xen/arch/ia64/xen/process.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/process.c Thu Oct 13 20:24:45 2005 @@ -745,6 +745,8 @@ case 26: printf("*** NaT fault... attempting to handle as privop\n"); printf("isr=%p, ifa=%p,iip=%p,ipsr=%p\n",isr,ifa,regs->cr_iip,psr); + regs->eml_unat = 0; + return; vector = priv_emulate(v,regs,isr); if (vector == IA64_NO_FAULT) { printf("*** Handled privop masquerading as NaT fault\n"); diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/regionreg.c Thu Oct 13 20:24:45 2005 @@ -15,7 +15,8 @@ #include <asm/regionreg.h> #include <asm/vhpt.h> #include <asm/vcpu.h> -extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); +extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal); +extern void *pal_vaddr; #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1) @@ -66,9 +67,12 @@ { ia64_rr rrv; + rrv.rrval = 0; // Or else may see reserved bit fault rrv.rid = allocate_reserved_rid(); rrv.ps = PAGE_SHIFT; rrv.ve = 0; + /* Mangle metaphysical rid */ + rrv.rrval = vmMangleRID(rrv.rrval); return rrv.rrval; } @@ -213,6 +217,7 @@ unsigned long rreg = REGION_NUMBER(rr); ia64_rr rrv, newrrv, memrrv; unsigned long newrid; + extern unsigned long vhpt_paddr; if (val == -1) return 1; @@ -250,9 +255,10 @@ newrrv.rid = newrid; newrrv.ve = 1; // VHPT now enabled for region 7!! newrrv.ps = PAGE_SHIFT; - if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval; + if (rreg == 0) v->arch.metaphysical_saved_rr0 = + vmMangleRID(newrrv.rrval); if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info, - v->arch.privregs); + v->arch.privregs, vhpt_paddr, pal_vaddr); else set_rr(rr,newrrv.rrval); #endif return 1; @@ -265,7 +271,8 @@ ia64_rr rrv; // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING - set_rr(0,v->arch.metaphysical_rr0); + ia64_set_rr(0,v->arch.metaphysical_rr0); + ia64_srlz_d(); } // validates/changes region registers 0-6 in the currently executing domain @@ -290,7 +297,7 @@ ia64_rr rrv; rrv.rrval = 0; - rrv.rrval = v->domain->arch.metaphysical_rr0; + //rrv.rrval = v->domain->arch.metaphysical_rr0; rrv.ps = PAGE_SHIFT; rrv.ve = 1; if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); } @@ -343,12 +350,16 @@ if (VCPU(v,metaphysical_mode)) { ia64_rr rrv; +#if 0 rrv.rrval = 0; rrv.rid = v->domain->arch.metaphysical_rr0; rrv.ps = PAGE_SHIFT; rrv.ve = 1; rr0 = rrv.rrval; set_rr_no_srlz(0x0000000000000000L, rr0); +#endif + rr0 = v->domain->arch.metaphysical_rr0; + ia64_set_rr(0x0000000000000000L, rr0); ia64_srlz_d(); } else { diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/vcpu.c Thu Oct 13 20:24:45 2005 @@ -775,6 +775,7 @@ } #ifdef HEARTBEAT_FREQ if (domid >= N_DOMS) domid = N_DOMS-1; +#if 0 if (vector == (PSCB(vcpu,itv) & 0xff)) { if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) { printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n", @@ -783,6 +784,7 @@ //dump_runq(); } } +#endif else nonclockcount[domid]++; #endif // now have an unmasked, pending, deliverable vector! diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/xenasm.S --- a/xen/arch/ia64/xen/xenasm.S Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/xenasm.S Thu Oct 13 20:24:45 2005 @@ -48,11 +48,11 @@ // FIXME? Note that this turns off the DB bit (debug) #define PSR_BITS_TO_SET IA64_PSR_BN -//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info); +//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal); GLOBAL_ENTRY(ia64_new_rr7) // not sure this unwind statement is correct... .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) - alloc loc1 = ar.pfs, 3, 8, 0, 0 + alloc loc1 = ar.pfs, 5, 9, 0, 0 1: { mov r28 = in0 // copy procedure index mov r8 = ip // save ip to compute branch @@ -63,10 +63,12 @@ ;; tpa loc2=loc2 // grab this BEFORE changing rr7 ;; + dep loc8=0,in4,60,4 + ;; #if VHPT_ENABLED - movl loc6=VHPT_ADDR - ;; - tpa loc6=loc6 // grab this BEFORE changing rr7 + mov loc6=in3 + ;; + //tpa loc6=loc6 // grab this BEFORE changing rr7 ;; #endif mov loc5=in1 @@ -229,6 +231,21 @@ mov r25=IA64_TR_ARCH_INFO ;; itr.d dtr[r25]=r23 // wire in new mapping... + ;; + + //Purge/insert PAL TR + mov r24=IA64_TR_PALCODE + movl r25=PAGE_KERNEL + ;; + or loc8=r25,loc8 + mov r23=IA64_GRANULE_SHIFT<<2 + ;; + ptr.i in4,r23 + ;; + mov cr.itir=r23 + mov cr.ifa=in4 + ;; + itr.i itr[r24]=loc8 ;; // done, switch back to virtual and return diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/xenmisc.c --- a/xen/arch/ia64/xen/xenmisc.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/xenmisc.c Thu Oct 13 20:24:45 2005 @@ -17,6 +17,7 @@ #include <asm/io.h> #include <xen/softirq.h> #include <public/sched.h> +#include <asm/vhpt.h> efi_memory_desc_t ia64_efi_io_md; EXPORT_SYMBOL(ia64_efi_io_md); @@ -310,9 +311,13 @@ if (!i--) { printk("+",id); i = 1000000; } } - if (VMX_DOMAIN(current)){ + if (VMX_DOMAIN(current)){ vmx_load_all_rr(current); }else{ + extern char ia64_ivt; + ia64_set_iva(&ia64_ivt); + ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | + VHPT_ENABLED); if (!is_idle_task(current->domain)) { load_region_regs(current); if (vcpu_timer_expired(current)) vcpu_pend_timer(current); diff -r 54b112b314fe -r 760f5e85c706 xen/arch/ia64/xen/xentime.c --- a/xen/arch/ia64/xen/xentime.c Wed Oct 12 23:12:59 2005 +++ b/xen/arch/ia64/xen/xentime.c Thu Oct 13 20:24:45 2005 @@ -99,6 +99,7 @@ { unsigned long new_itm, old_itc; +#if 0 #define HEARTBEAT_FREQ 16 // period in seconds #ifdef HEARTBEAT_FREQ static long count = 0; @@ -110,6 +111,7 @@ count = 0; } #endif +#endif if (current->domain == dom0) { // FIXME: there's gotta be a better way of doing this... // We have to ensure that domain0 is launched before we @@ -117,12 +119,14 @@ //domain0_ready = 1; // moved to xensetup.c VCPU(current,pending_interruption) = 1; } - if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { - vcpu_pend_timer(dom0->vcpu[0]); - //vcpu_set_next_timer(dom0->vcpu[0]); - vcpu_wake(dom0->vcpu[0]); - } - if (!is_idle_task(current->domain) && current->domain != dom0) { + if (domain0_ready && current->domain != dom0) { + if(vcpu_timer_expired(dom0->vcpu[0])) { + vcpu_pend_timer(dom0->vcpu[0]); + //vcpu_set_next_timer(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); + } + } + if (!is_idle_task(current->domain)) { if (vcpu_timer_expired(current)) { vcpu_pend_timer(current); // ensure another timer interrupt happens even if domain doesn't @@ -132,8 +136,11 @@ } new_itm = local_cpu_data->itm_next; - if (!time_after(ia64_get_itc(), new_itm)) + if (!VMX_DOMAIN(current) && !time_after(ia64_get_itc(), new_itm)) return; + + if (VMX_DOMAIN(current)) + vcpu_wake(current); while (1) { new_itm += local_cpu_data->itm_delta; diff -r 54b112b314fe -r 760f5e85c706 xen/include/asm-ia64/config.h --- a/xen/include/asm-ia64/config.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/asm-ia64/config.h Thu Oct 13 20:24:45 2005 @@ -102,7 +102,7 @@ #endif // xen/include/asm/config.h -#define HZ 100 +//#define HZ 1000 // FIXME SMP: leave SMP for a later time #define barrier() __asm__ __volatile__("": : :"memory") diff -r 54b112b314fe -r 760f5e85c706 xen/include/asm-ia64/event.h --- a/xen/include/asm-ia64/event.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/asm-ia64/event.h Thu Oct 13 20:24:45 2005 @@ -14,6 +14,21 @@ static inline void evtchn_notify(struct vcpu *v) { + /* + * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of + * pending flag. These values may fluctuate (after all, we hold no + * locks) but the key insight is that each change will cause + * evtchn_upcall_pending to be polled. + * + * NB2. We save VCPUF_running across the unblock to avoid a needless + * IPI for domains that we IPI'd to unblock. + */ + int running = test_bit(_VCPUF_running, &v->vcpu_flags); + vcpu_unblock(v); + if ( running ) + smp_send_event_check_cpu(v->processor); + + if(!VMX_DOMAIN(v)) vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector); } diff -r 54b112b314fe -r 760f5e85c706 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/asm-ia64/mm.h Thu Oct 13 20:24:45 2005 @@ -405,6 +405,7 @@ extern int nr_swap_pages; extern unsigned long *mpt_table; +extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr); #undef machine_to_phys_mapping #define machine_to_phys_mapping mpt_table @@ -433,10 +434,10 @@ #define __gpfn_is_mem(_d, gpfn) \ (__gpfn_valid(_d, gpfn) ? \ - (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT) & GPFN_IO_MASK) == GPFN_MEM) : 0) - - -//#define __gpa_to_mpa(_d, gpa) \ -// ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) + ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == GPFN_MEM) : 0) + + +#define __gpa_to_mpa(_d, gpa) \ + ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) #endif /* __ASM_IA64_MM_H__ */ diff -r 54b112b314fe -r 760f5e85c706 xen/include/asm-ia64/vmx.h --- a/xen/include/asm-ia64/vmx.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/asm-ia64/vmx.h Thu Oct 13 20:24:45 2005 @@ -24,6 +24,7 @@ #define RR7_SWITCH_SHIFT 12 /* 4k enough */ #include <public/io/ioreq.h> + extern void identify_vmx_feature(void); extern unsigned int vmx_enabled; diff -r 54b112b314fe -r 760f5e85c706 xen/include/asm-ia64/xenkregs.h --- a/xen/include/asm-ia64/xenkregs.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/asm-ia64/xenkregs.h Thu Oct 13 20:24:45 2005 @@ -6,7 +6,8 @@ */ #define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ #define IA64_TR_VHPT 4 /* dtr4: vhpt */ -#define IA64_TR_ARCH_INFO 5 +#define IA64_TR_ARCH_INFO 5 +#define IA64_TR_PERVP_VHPT 6 /* Processor status register bits: */ #define IA64_PSR_VM_BIT 46 diff -r 54b112b314fe -r 760f5e85c706 xen/include/public/arch-ia64.h --- a/xen/include/public/arch-ia64.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/public/arch-ia64.h Thu Oct 13 20:24:45 2005 @@ -37,6 +37,33 @@ #define GPFN_INV_MASK (31UL << 59) /* Guest pfn is invalid */ #define INVALID_MFN (~0UL) + +#define MEM_G (1UL << 30) +#define MEM_M (1UL << 20) + +#define MMIO_START (3 * MEM_G) +#define MMIO_SIZE (512 * MEM_M) + +#define VGA_IO_START 0xA0000UL +#define VGA_IO_SIZE 0x20000 + +#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) +#define LEGACY_IO_SIZE (64*MEM_M) + +#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) +#define IO_PAGE_SIZE PAGE_SIZE + +#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) +#define STORE_PAGE_SIZE PAGE_SIZE + +#define IO_SAPIC_START 0xfec00000UL +#define IO_SAPIC_SIZE 0x100000 + +#define PIB_START 0xfee00000UL +#define PIB_SIZE 0x100000 + +#define GFW_START (4*MEM_G -16*MEM_M) +#define GFW_SIZE (16*MEM_M) /* * NB. This may become a 64-bit count with no shift. If this happens then the diff -r 54b112b314fe -r 760f5e85c706 xen/include/public/io/ioreq.h --- a/xen/include/public/io/ioreq.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/public/io/ioreq.h Thu Oct 13 20:24:45 2005 @@ -35,6 +35,13 @@ #define IOREQ_TYPE_OR 3 #define IOREQ_TYPE_XOR 4 +#ifdef __HYPERVISOR__ +#include <public/io/vmx_vlapic.h> +#else +#include <xen/io/vmx_vlapic.h> +#endif + + /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending @@ -55,10 +62,6 @@ u8 type; /* I/O type */ } ioreq_t; -#define MAX_VECTOR 256 -#define BITS_PER_BYTE 8 -#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u64))) - typedef struct { u64 pic_intr[INTR_LEN]; u64 pic_mask[INTR_LEN]; @@ -67,10 +70,11 @@ typedef struct { ioreq_t vp_ioreq; - unsigned long vp_intr[INTR_LEN]; + vl_apic_info apic_intr; } vcpu_iodata_t; typedef struct { + int vcpu_number; global_iodata_t sp_global; vcpu_iodata_t vcpu_iodata[1]; } shared_iopage_t; diff -r 54b112b314fe -r 760f5e85c706 xen/include/public/io/vmx_vlapic.h --- a/xen/include/public/io/vmx_vlapic.h Wed Oct 12 23:12:59 2005 +++ b/xen/include/public/io/vmx_vlapic.h Thu Oct 13 20:24:45 2005 @@ -1,57 +1,29 @@ #ifndef _VMX_VLAPIC_H #define _VMX_VLAPIC_H -/* - We extended one bit for PIC type - */ #define VLAPIC_DELIV_MODE_FIXED 0x0 #define VLAPIC_DELIV_MODE_LPRI 0x1 #define VLAPIC_DELIV_MODE_SMI 0x2 +#define VLAPIC_DELIV_MODE_PMI 0x2 #define VLAPIC_DELIV_MODE_NMI 0x4 #define VLAPIC_DELIV_MODE_INIT 0x5 #define VLAPIC_DELIV_MODE_STARTUP 0x6 #define VLAPIC_DELIV_MODE_EXT 0x7 #define VLAPIC_DELIV_MODE_MASK 0x8 -#define VLAPIC_MSG_LEVEL 4 - -#define INTR_EXT 0 -#define INTR_APIC 1 -#define INTR_LAPIC 2 - -#define VL_STATE_EOI 1 -#define VL_STATE_EXT_LOCK 2 -#define VL_STATE_MSG_LOCK 3 -#define VL_STATE_EOI_LOCK 3 - -#define VLOCAL_APIC_MAX_INTS 256 -#define VLAPIC_INT_COUNT (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(u64))) -#define VLAPIC_INT_COUNT_32 (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(u32))) - -struct vapic_bus_message{ - u8 deliv_mode:4; /* deliver mode, including fixed, LPRI, etc */ - u8 level:1; /* level or edge */ - u8 trig_mod:1; /* assert or disassert */ - u8 reserved:2; - u8 vector; -}; +#define MAX_VECTOR 256 +#define BITS_PER_BYTE 8 +#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u64))) +#define INTR_LEN_32 (MAX_VECTOR/(BITS_PER_BYTE * sizeof(u32))) typedef struct { - /* interrupt for PIC and ext type IOAPIC interrupt */ - u64 vl_ext_intr[VLAPIC_INT_COUNT]; - u64 vl_ext_intr_mask[VLAPIC_INT_COUNT]; - u64 vl_apic_intr[VLAPIC_INT_COUNT]; - u64 vl_apic_tmr[VLAPIC_INT_COUNT]; - u64 vl_eoi[VLAPIC_INT_COUNT]; u32 vl_lapic_id; - u32 direct_intr; u32 vl_apr; u32 vl_logical_dest; u32 vl_dest_format; u32 vl_arb_id; - u32 vl_state; - u32 apic_msg_count; - struct vapic_bus_message vl_apic_msg[24]; -} vlapic_info; + u64 irr[INTR_LEN]; + u64 tmr[INTR_LEN]; +}vl_apic_info; #endif /* _VMX_VLAPIC_H_ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |