[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.1-testing] Simplify APIC_ACCESS VMX support.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1198795859 0 # Node ID b14bedd3139094b010439ea40c2d7297154be4ad # Parent 044723b2ab496c31bafcd66c1fbb643f8ca65250 Simplify APIC_ACCESS VMX support. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> xen-unstable changeset: 15186:96a59a5ae656 xen-unstable date: Wed May 30 17:01:26 2007 +0100 --- xen/arch/x86/domain.c | 6 - xen/arch/x86/hvm/hvm.c | 23 ++-- xen/arch/x86/hvm/svm/svm.c | 11 ++ xen/arch/x86/hvm/vmx/intr.c | 5 xen/arch/x86/hvm/vmx/vmcs.c | 79 +++++---------- xen/arch/x86/hvm/vmx/vmx.c | 192 +++++++++++++++---------------------- xen/include/asm-x86/hvm/domain.h | 7 - xen/include/asm-x86/hvm/hvm.h | 6 - xen/include/asm-x86/hvm/vlapic.h | 6 - xen/include/asm-x86/hvm/vmx/vmcs.h | 39 +++---- xen/include/asm-x86/hvm/vmx/vmx.h | 3 xen/include/asm-x86/msr.h | 12 +- 12 files changed, 169 insertions(+), 220 deletions(-) diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/domain.c Thu Dec 27 22:50:59 2007 +0000 @@ -412,7 +412,7 @@ int arch_domain_create(struct domain *d) int i; #endif l1_pgentry_t gdt_l1e; - int vcpuid, pdpt_order; + int vcpuid, pdpt_order, paging_initialised = 0; int rc = -ENOMEM; pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); @@ -463,6 +463,7 @@ int arch_domain_create(struct domain *d) #endif paging_domain_init(d); + paging_initialised = 1; if ( !is_idle_domain(d) ) { @@ -490,12 +491,13 @@ int arch_domain_create(struct domain *d) d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = (CONFIG_PAGING_LEVELS != 4); } - return 0; fail: free_xenheap_page(d->shared_info); + if ( paging_initialised ) + paging_final_teardown(d); #ifdef __x86_64__ if ( d->arch.mm_perdomain_l2 ) free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2)); diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/hvm/hvm.c Thu Dec 27 22:50:59 2007 +0000 @@ -224,7 +224,6 @@ int hvm_domain_initialise(struct domain spin_lock_init(&d->arch.hvm_domain.pbuf_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); - spin_lock_init(&d->arch.hvm_domain.vapic_access_lock); rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); if ( rc != 0 ) @@ -236,7 +235,7 @@ int hvm_domain_initialise(struct domain hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq); hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); - return 0; + return hvm_funcs.domain_initialise(d); } void hvm_domain_relinquish_resources(struct domain *d) @@ -252,6 +251,7 @@ void hvm_domain_relinquish_resources(str void hvm_domain_destroy(struct domain *d) { + hvm_funcs.domain_destroy(d); } static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) @@ -437,16 +437,17 @@ int hvm_vcpu_initialise(struct vcpu *v) spin_lock_init(&v->arch.hvm_vcpu.tm_lock); INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); - if ( v->vcpu_id != 0 ) - return 0; - - pit_init(v, cpu_khz); - rtc_init(v, RTC_PORT(0)); - pmtimer_init(v); - hpet_init(v); + if ( v->vcpu_id == 0 ) + { + /* NB. All these really belong in hvm_domain_initialise(). */ + pit_init(v, cpu_khz); + rtc_init(v, RTC_PORT(0)); + pmtimer_init(v); + hpet_init(v); - /* Init guest TSC to start from zero. */ - hvm_set_guest_time(v, 0); + /* Init guest TSC to start from zero. */ + hvm_set_guest_time(v, 0); + } return 0; } diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Dec 27 22:50:59 2007 +0000 @@ -950,6 +950,15 @@ static void svm_do_resume(struct vcpu *v reset_stack_and_jump(svm_asm_do_resume); } +static int svm_domain_initialise(struct domain *d) +{ + return 0; +} + +static void svm_domain_destroy(struct domain *d) +{ +} + static int svm_vcpu_initialise(struct vcpu *v) { int rc; @@ -994,6 +1003,8 @@ static struct hvm_function_table svm_fun static struct hvm_function_table svm_function_table = { .name = "SVM", .disable = stop_svm, + .domain_initialise = svm_domain_initialise, + .domain_destroy = svm_domain_destroy, .vcpu_initialise = svm_vcpu_initialise, .vcpu_destroy = svm_vcpu_destroy, .store_cpu_guest_regs = svm_store_cpu_guest_regs, diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/hvm/vmx/intr.c --- a/xen/arch/x86/hvm/vmx/intr.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/intr.c Thu Dec 27 22:50:59 2007 +0000 @@ -88,11 +88,6 @@ static void update_tpr_threshold(struct if ( !cpu_has_vmx_tpr_shadow ) return; - -#ifdef __i386__ - if ( !vlapic->mmap_vtpr_enabled ) - return; -#endif if ( !vlapic_enabled(vlapic) || ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) ) diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Thu Dec 27 22:50:59 2007 +0000 @@ -63,9 +63,6 @@ static u32 adjust_vmx_controls(u32 ctl_m return ctl; } -#define vmx_has_secondary_exec_ctls \ - (_vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS) - void vmx_init_vmcs_config(void) { u32 vmx_msr_low, vmx_msr_high, min, opt; @@ -79,7 +76,7 @@ void vmx_init_vmcs_config(void) PIN_BASED_NMI_EXITING); opt = 0; /*PIN_BASED_VIRTUAL_NMIS*/ _vmx_pin_based_exec_control = adjust_vmx_controls( - min, opt, MSR_IA32_VMX_PINBASED_CTLS_MSR); + min, opt, MSR_IA32_VMX_PINBASED_CTLS); min = (CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | @@ -88,24 +85,21 @@ void vmx_init_vmcs_config(void) CPU_BASED_MOV_DR_EXITING | CPU_BASED_ACTIVATE_IO_BITMAP | CPU_BASED_USE_TSC_OFFSETING); - opt = CPU_BASED_ACTIVATE_MSR_BITMAP; + opt = CPU_BASED_ACTIVATE_MSR_BITMAP; opt |= CPU_BASED_TPR_SHADOW; - opt |= ACTIVATE_SECONDARY_CONTROLS; + opt |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; _vmx_cpu_based_exec_control = adjust_vmx_controls( - min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR); + min, opt, MSR_IA32_VMX_PROCBASED_CTLS); #ifdef __x86_64__ if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) ) { min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; _vmx_cpu_based_exec_control = adjust_vmx_controls( - min, opt, MSR_IA32_VMX_PROCBASED_CTLS_MSR); - } -#elif defined(__i386__) - if ( !vmx_has_secondary_exec_ctls ) - _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; -#endif - - if ( vmx_has_secondary_exec_ctls ) + min, opt, MSR_IA32_VMX_PROCBASED_CTLS); + } +#endif + + if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) { min = 0; opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; @@ -113,27 +107,33 @@ void vmx_init_vmcs_config(void) min, opt, MSR_IA32_VMX_PROCBASED_CTLS2); } +#if defined(__i386__) + /* If we can't virtualise APIC accesses, the TPR shadow is pointless. */ + if ( !(_vmx_secondary_exec_control & + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) ) + _vmx_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; +#endif + min = VM_EXIT_ACK_INTR_ON_EXIT; opt = 0; #ifdef __x86_64__ min |= VM_EXIT_IA32E_MODE; #endif _vmx_vmexit_control = adjust_vmx_controls( - min, opt, MSR_IA32_VMX_EXIT_CTLS_MSR); + min, opt, MSR_IA32_VMX_EXIT_CTLS); min = opt = 0; _vmx_vmentry_control = adjust_vmx_controls( - min, opt, MSR_IA32_VMX_ENTRY_CTLS_MSR); - - rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); + min, opt, MSR_IA32_VMX_ENTRY_CTLS); + + rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); if ( smp_processor_id() == 0 ) { vmcs_revision_id = vmx_msr_low; vmx_pin_based_exec_control = _vmx_pin_based_exec_control; vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control; - if ( vmx_has_secondary_exec_ctls ) - vmx_secondary_exec_control = _vmx_secondary_exec_control; + vmx_secondary_exec_control = _vmx_secondary_exec_control; vmx_vmexit_control = _vmx_vmexit_control; vmx_vmentry_control = _vmx_vmentry_control; } @@ -142,8 +142,7 @@ void vmx_init_vmcs_config(void) BUG_ON(vmcs_revision_id != vmx_msr_low); BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control); BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control); - if ( vmx_has_secondary_exec_ctls ) - BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control); + BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control); BUG_ON(vmx_vmexit_control != _vmx_vmexit_control); BUG_ON(vmx_vmentry_control != _vmx_vmentry_control); } @@ -318,7 +317,7 @@ static void construct_vmcs(struct vcpu * __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control); __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control; - if ( vmx_cpu_based_exec_control & ACTIVATE_SECONDARY_CONTROLS ) + if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) __vmwrite(SECONDARY_VM_EXEC_CONTROL, vmx_secondary_exec_control); if ( cpu_has_vmx_msr_bitmap ) @@ -445,24 +444,14 @@ static void construct_vmcs(struct vcpu * cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE); __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4); -#ifdef __x86_64__ - /* CR8 based VLAPIC TPR optimization. */ if ( cpu_has_vmx_tpr_shadow ) { - __vmwrite(VIRTUAL_APIC_PAGE_ADDR, - page_to_maddr(vcpu_vlapic(v)->regs_page)); + uint64_t virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page); + __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma); +#if defined (__i386__) + __vmwrite(VIRTUAL_APIC_PAGE_ADDR_HIGH, virt_page_ma >> 32); +#endif __vmwrite(TPR_THRESHOLD, 0); - } -#endif - - /* Memory-mapped based VLAPIC TPR optimization. */ - if ( cpu_has_vmx_mmap_vtpr_optimization ) - { - __vmwrite(VIRTUAL_APIC_PAGE_ADDR, - page_to_maddr(vcpu_vlapic(v)->regs_page)); - __vmwrite(TPR_THRESHOLD, 0); - - vcpu_vlapic(v)->mmap_vtpr_enabled = 1; } __vmwrite(GUEST_LDTR_SELECTOR, 0); @@ -534,18 +523,6 @@ void vmx_do_resume(struct vcpu *v) vmx_load_vmcs(v); hvm_migrate_timers(v); vmx_set_host_env(v); - } - - if ( !v->arch.hvm_vmx.launched && vcpu_vlapic(v)->mmap_vtpr_enabled ) - { - struct page_info *pg = change_guest_physmap_for_vtpr(v->domain, 1); - - if ( pg == NULL ) - { - gdprintk(XENLOG_ERR, "change_guest_physmap_for_vtpr failed!\n"); - domain_crash_synchronous(); - } - __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(pg)); } debug_state = v->domain->debugger_attached; diff -r 044723b2ab49 -r b14bedd31390 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Dec 27 22:50:59 2007 +0000 @@ -56,6 +56,20 @@ static void vmx_ctxt_switch_from(struct static void vmx_ctxt_switch_from(struct vcpu *v); static void vmx_ctxt_switch_to(struct vcpu *v); +static int vmx_alloc_vlapic_mapping(struct domain *d); +static void vmx_free_vlapic_mapping(struct domain *d); +static void vmx_install_vlapic_mapping(struct vcpu *v); + +static int vmx_domain_initialise(struct domain *d) +{ + return vmx_alloc_vlapic_mapping(d); +} + +static void vmx_domain_destroy(struct domain *d) +{ + vmx_free_vlapic_mapping(d); +} + static int vmx_vcpu_initialise(struct vcpu *v) { int rc; @@ -73,6 +87,8 @@ static int vmx_vcpu_initialise(struct vc v->vcpu_id, rc); return rc; } + + vmx_install_vlapic_mapping(v); return 0; } @@ -1207,6 +1223,8 @@ static struct hvm_function_table vmx_fun static struct hvm_function_table vmx_function_table = { .name = "VMX", .disable = stop_vmx, + .domain_initialise = vmx_domain_initialise, + .domain_destroy = vmx_domain_destroy, .vcpu_initialise = vmx_vcpu_initialise, .vcpu_destroy = vmx_vcpu_destroy, .store_cpu_guest_regs = vmx_store_cpu_guest_regs, @@ -2590,7 +2608,7 @@ static int vmx_do_msr_read(struct cpu_us case MSR_IA32_APICBASE: msr_content = vcpu_vlapic(v)->hw.apic_base_msr; break; - case MSR_IA32_VMX_BASIC_MSR...MSR_IA32_VMX_CR4_FIXED1: + case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_CR4_FIXED1: goto gp_fault; case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_STATUS: @@ -2633,112 +2651,66 @@ gp_fault: return 0; } -struct page_info * change_guest_physmap_for_vtpr(struct domain *d, - int enable_vtpr) -{ - struct page_info *pg; - unsigned long pfn, mfn; - - spin_lock(&d->arch.hvm_domain.vapic_access_lock); - - pg = d->arch.hvm_domain.apic_access_page; - pfn = paddr_to_pfn(APIC_DEFAULT_PHYS_BASE); - - if ( enable_vtpr ) - { - if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access ) - goto out; - - if ( pg == NULL ) - pg = alloc_domheap_page(d); - if ( pg == NULL ) - { - gdprintk(XENLOG_ERR, "alloc_domheap_pages() failed!\n"); - goto out; - } - - mfn = page_to_mfn(pg); - d->arch.hvm_domain.apic_access_page = pg; - - guest_physmap_add_page(d, pfn, mfn); - - d->arch.hvm_domain.physmap_changed_for_vlapic_access = 1; - - goto out; - } - else - { - if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access ) - { - mfn = page_to_mfn(pg); - guest_physmap_remove_page(d, pfn, mfn); - flush_tlb_mask(d->domain_dirty_cpumask); - - d->arch.hvm_domain.physmap_changed_for_vlapic_access = 0; - } - pg = NULL; - goto out; - } - -out: - spin_unlock(&d->arch.hvm_domain.vapic_access_lock); - return pg; -} - -static void check_vlapic_msr_for_vtpr(struct vcpu *v) +static int vmx_alloc_vlapic_mapping(struct domain *d) +{ + void *apic_va; + + if ( !cpu_has_vmx_virtualize_apic_accesses ) + return 0; + + apic_va = alloc_xenheap_page(); + if ( apic_va == NULL ) + return -ENOMEM; + share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable); + guest_physmap_add_page( + d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), virt_to_mfn(apic_va)); + d->arch.hvm_domain.vmx_apic_access_mfn = virt_to_mfn(apic_va); + + return 0; +} + +static void vmx_free_vlapic_mapping(struct domain *d) +{ + unsigned long mfn = d->arch.hvm_domain.vmx_apic_access_mfn; + if ( mfn != 0 ) + free_xenheap_page(mfn_to_virt(mfn)); +} + +static void vmx_install_vlapic_mapping(struct vcpu *v) +{ + paddr_t virt_page_ma, apic_page_ma; + + if ( !cpu_has_vmx_virtualize_apic_accesses ) + return; + + virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page); + apic_page_ma = v->domain->arch.hvm_domain.vmx_apic_access_mfn; + apic_page_ma <<= PAGE_SHIFT; + + vmx_vmcs_enter(v); + __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma); + __vmwrite(APIC_ACCESS_ADDR, apic_page_ma); +#if defined (__i386__) + __vmwrite(VIRTUAL_APIC_PAGE_ADDR_HIGH, virt_page_ma >> 32); + __vmwrite(APIC_ACCESS_ADDR_HIGH, apic_page_ma >> 32); +#endif + vmx_vmcs_exit(v); +} + +static void vmx_check_vlapic_msr(struct vcpu *v) { struct vlapic *vlapic = vcpu_vlapic(v); - int mmap_vtpr_enabled = vcpu_vlapic(v)->mmap_vtpr_enabled; - uint32_t tmp; - - - if ( vlapic_hw_disabled(vlapic) && mmap_vtpr_enabled ) - { - vcpu_vlapic(v)->mmap_vtpr_enabled = 0; - -#ifdef __i386__ - v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, - v->arch.hvm_vcpu.u.vmx.exec_control); -#elif defined(__x86_64__) - if ( !cpu_has_vmx_tpr_shadow ) - { - v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW; - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, - v->arch.hvm_vcpu.u.vmx.exec_control); - } -#endif - tmp = __vmread(SECONDARY_VM_EXEC_CONTROL); - tmp &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp); - - change_guest_physmap_for_vtpr(v->domain, 0); - } - else if ( !vlapic_hw_disabled(vlapic) && !mmap_vtpr_enabled && - cpu_has_vmx_mmap_vtpr_optimization ) - { - vcpu_vlapic(v)->mmap_vtpr_enabled = 1; - - v->arch.hvm_vcpu.u.vmx.exec_control |= - ( ACTIVATE_SECONDARY_CONTROLS | CPU_BASED_TPR_SHADOW ); - __vmwrite(CPU_BASED_VM_EXEC_CONTROL, - v->arch.hvm_vcpu.u.vmx.exec_control); - tmp = __vmread(SECONDARY_VM_EXEC_CONTROL); - tmp |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - __vmwrite(SECONDARY_VM_EXEC_CONTROL, tmp); - - change_guest_physmap_for_vtpr(v->domain, 1); - } - - if ( vcpu_vlapic(v)->mmap_vtpr_enabled && - !vlapic_hw_disabled(vlapic) && - (vlapic_base_address(vlapic) != APIC_DEFAULT_PHYS_BASE) ) - { - gdprintk(XENLOG_ERR, - "Local APIC base address is set to 0x%016"PRIx64"!\n", - vlapic_base_address(vlapic)); - domain_crash_synchronous(); - } + uint32_t ctl; + + if ( !cpu_has_vmx_virtualize_apic_accesses ) + return; + + ctl = __vmread(SECONDARY_VM_EXEC_CONTROL); + ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if ( !vlapic_hw_disabled(vlapic) && + (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) ) + ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl); } static int vmx_do_msr_write(struct cpu_user_regs *regs) @@ -2770,9 +2742,9 @@ static int vmx_do_msr_write(struct cpu_u break; case MSR_IA32_APICBASE: vlapic_msr_set(vcpu_vlapic(v), msr_content); - check_vlapic_msr_for_vtpr(v); - break; - case MSR_IA32_VMX_BASIC_MSR...MSR_IA32_VMX_CR4_FIXED1: + vmx_check_vlapic_msr(v); + break; + case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_CR4_FIXED1: goto gp_fault; default: if ( !long_mode_do_msr_write(regs) ) @@ -3104,12 +3076,12 @@ asmlinkage void vmx_vmexit_handler(struc case EXIT_REASON_TPR_BELOW_THRESHOLD: break; + case EXIT_REASON_APIC_ACCESS: { unsigned long offset; - exit_qualification = __vmread(EXIT_QUALIFICATION); - offset = exit_qualification & 0x0fffUL; + offset = exit_qualification & 0x0fffUL; handle_mmio(APIC_DEFAULT_PHYS_BASE | offset); break; } diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/hvm/domain.h --- a/xen/include/asm-x86/hvm/domain.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/hvm/domain.h Thu Dec 27 22:50:59 2007 +0000 @@ -41,11 +41,6 @@ struct hvm_domain { s64 tsc_frequency; struct pl_time pl_time; - /* For memory-mapped vLAPIC/vTPR access optimization */ - spinlock_t vapic_access_lock; - int physmap_changed_for_vlapic_access : 1; - struct page_info *apic_access_page; - struct hvm_io_handler io_handler; /* Lock protects access to irq, vpic and vioapic. */ @@ -60,6 +55,8 @@ struct hvm_domain { spinlock_t pbuf_lock; uint64_t params[HVM_NR_PARAMS]; + + unsigned long vmx_apic_access_mfn; }; #endif /* __ASM_X86_HVM_DOMAIN_H__ */ diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/hvm/hvm.h Thu Dec 27 22:50:59 2007 +0000 @@ -69,8 +69,10 @@ struct hvm_function_table { void (*disable)(void); /* - * Initialise/destroy HVM VCPU resources - */ + * Initialise/destroy HVM domain/vcpu resources + */ + int (*domain_initialise)(struct domain *d); + void (*domain_destroy)(struct domain *d); int (*vcpu_initialise)(struct vcpu *v); void (*vcpu_destroy)(struct vcpu *v); diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/hvm/vlapic.h --- a/xen/include/asm-x86/hvm/vlapic.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/hvm/vlapic.h Thu Dec 27 22:50:59 2007 +0000 @@ -33,7 +33,7 @@ #define vlapic_domain(vpic) (vlapic_vcpu(vlapic)->domain) #define VLAPIC_ID(vlapic) \ - (GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID))) + (GET_APIC_ID(vlapic_get_reg((vlapic), APIC_ID))) /* * APIC can be disabled in two ways: @@ -50,7 +50,7 @@ #define vlapic_enabled(vlapic) (!vlapic_disabled(vlapic)) #define vlapic_base_address(vlapic) \ - (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE) + ((vlapic)->hw.apic_base_msr & MSR_IA32_APICBASE_BASE) struct vlapic { struct hvm_hw_lapic hw; @@ -58,8 +58,6 @@ struct vlapic { struct periodic_time pt; s_time_t timer_last_update; struct page_info *regs_page; - - int mmap_vtpr_enabled : 1; }; static inline uint32_t vlapic_get_reg(struct vlapic *vlapic, uint32_t reg) diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Dec 27 22:50:59 2007 +0000 @@ -94,24 +94,24 @@ void vmx_vmcs_enter(struct vcpu *v); void vmx_vmcs_enter(struct vcpu *v); void vmx_vmcs_exit(struct vcpu *v); -#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 -#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 -#define CPU_BASED_HLT_EXITING 0x00000080 -#define CPU_BASED_INVLPG_EXITING 0x00000200 -#define CPU_BASED_MWAIT_EXITING 0x00000400 -#define CPU_BASED_RDPMC_EXITING 0x00000800 -#define CPU_BASED_RDTSC_EXITING 0x00001000 -#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 -#define CPU_BASED_CR8_STORE_EXITING 0x00100000 -#define CPU_BASED_TPR_SHADOW 0x00200000 -#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 -#define CPU_BASED_MOV_DR_EXITING 0x00800000 -#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 -#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 -#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 -#define CPU_BASED_MONITOR_EXITING 0x20000000 -#define CPU_BASED_PAUSE_EXITING 0x40000000 -#define ACTIVATE_SECONDARY_CONTROLS 0x80000000 +#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 +#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 +#define CPU_BASED_HLT_EXITING 0x00000080 +#define CPU_BASED_INVLPG_EXITING 0x00000200 +#define CPU_BASED_MWAIT_EXITING 0x00000400 +#define CPU_BASED_RDPMC_EXITING 0x00000800 +#define CPU_BASED_RDTSC_EXITING 0x00001000 +#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 +#define CPU_BASED_CR8_STORE_EXITING 0x00100000 +#define CPU_BASED_TPR_SHADOW 0x00200000 +#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 +#define CPU_BASED_MOV_DR_EXITING 0x00800000 +#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 +#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 +#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 +#define CPU_BASED_MONITOR_EXITING 0x20000000 +#define CPU_BASED_PAUSE_EXITING 0x40000000 +#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 extern u32 vmx_cpu_based_exec_control; #define PIN_BASED_EXT_INTR_MASK 0x00000001 @@ -135,9 +135,6 @@ extern u32 vmx_secondary_exec_control; (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) #define cpu_has_vmx_tpr_shadow \ (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) -#define cpu_has_vmx_mmap_vtpr_optimization \ - (cpu_has_vmx_virtualize_apic_accesses && cpu_has_vmx_tpr_shadow) - #define cpu_has_vmx_msr_bitmap \ (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP) extern char *vmx_msr_bitmap; diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Dec 27 22:50:59 2007 +0000 @@ -32,9 +32,6 @@ void vmx_intr_assist(void); void vmx_intr_assist(void); void vmx_do_resume(struct vcpu *); void set_guest_time(struct vcpu *v, u64 gtime); - -extern struct page_info *change_guest_physmap_for_vtpr(struct domain *d, - int enable_vtpr); /* * Exit Reasons diff -r 044723b2ab49 -r b14bedd31390 xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Thu Dec 27 22:34:52 2007 +0000 +++ b/xen/include/asm-x86/msr.h Thu Dec 27 22:50:59 2007 +0000 @@ -110,12 +110,12 @@ static inline void wrmsrl(unsigned int m #define MSR_P6_PERFCTR1 0xc2 /* MSRs & bits used for VMX enabling */ -#define MSR_IA32_VMX_BASIC_MSR 0x480 -#define MSR_IA32_VMX_PINBASED_CTLS_MSR 0x481 -#define MSR_IA32_VMX_PROCBASED_CTLS_MSR 0x482 -#define MSR_IA32_VMX_EXIT_CTLS_MSR 0x483 -#define MSR_IA32_VMX_ENTRY_CTLS_MSR 0x484 -#define MSR_IA32_VMX_MISC_MSR 0x485 +#define MSR_IA32_VMX_BASIC 0x480 +#define MSR_IA32_VMX_PINBASED_CTLS 0x481 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 +#define MSR_IA32_VMX_EXIT_CTLS 0x483 +#define MSR_IA32_VMX_ENTRY_CTLS 0x484 +#define MSR_IA32_VMX_MISC 0x485 #define MSR_IA32_VMX_CR0_FIXED0 0x486 #define MSR_IA32_VMX_CR0_FIXED1 0x487 #define MSR_IA32_VMX_CR4_FIXED0 0x488 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |