[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vmx: wbinvd optimization for pass-through domain.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1195230998 0 # Node ID e82fb0729b511446892a7be44031d06a0a640283 # Parent ef4b60c99735c883394270dcf4acd09633fac01c vmx: wbinvd optimization for pass-through domain. Optimise wbinvd exit emulation for pass-through domains to avoid "always wbinvd" when a VCPU is migrated. Instead, do host wbinvd on all host CPUs when wbinvd exit. Signed-off-by Yaozu (Eddie) Dong <eddie.dong@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 19 ++++++++++++------- xen/arch/x86/hvm/vmx/vmx.c | 28 ++++++++++++++++++++-------- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++ 3 files changed, 34 insertions(+), 15 deletions(-) diff -r ef4b60c99735 -r e82fb0729b51 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Fri Nov 16 16:22:00 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Fri Nov 16 16:36:38 2007 +0000 @@ -763,7 +763,7 @@ void vm_resume_fail(unsigned long eflags domain_crash_synchronous(); } -static void flush_cache(void *info) +static void wbinvd_ipi(void *info) { wbinvd(); } @@ -779,16 +779,21 @@ void vmx_do_resume(struct vcpu *v) } else { - /* For pass-through domain, guest PCI-E device driver may leverage the - * "Non-Snoop" I/O, and explicitly "WBINVD" or "CFLUSH" to a RAM space. - * In that case, if migration occurs before "WBINVD" or "CFLUSH", need - * to maintain data consistency. + /* + * For pass-through domain, guest PCI-E device driver may leverage the + * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space. + * Since migration may occur before WBINVD or CLFLUSH, we need to + * maintain data consistency either by: + * 1: flushing cache (wbinvd) when the guest is scheduled out if + * there is no wbinvd exit, or + * 2: execute wbinvd on all dirty pCPUs when guest wbinvd exits. */ - if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) + if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) && + !cpu_has_wbinvd_exiting ) { int cpu = v->arch.hvm_vmx.active_cpu; if ( cpu != -1 ) - on_selected_cpus(cpumask_of_cpu(cpu), flush_cache, NULL, 1, 1); + on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1); } vmx_clear_vmcs(v); diff -r ef4b60c99735 -r e82fb0729b51 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Nov 16 16:22:00 2007 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Nov 16 16:36:38 2007 +0000 @@ -2638,6 +2638,11 @@ static void vmx_do_extint(struct cpu_use } } +static void wbinvd_ipi(void *info) +{ + wbinvd(); +} + static void vmx_failed_vmentry(unsigned int exit_reason, struct cpu_user_regs *regs) { @@ -2913,14 +2918,21 @@ asmlinkage void vmx_vmexit_handler(struc __update_guest_eip(inst_len); if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) { - wbinvd(); - /* Disable further WBINVD intercepts. */ - if ( (exit_reason == EXIT_REASON_WBINVD) && - (vmx_cpu_based_exec_control & - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) - __vmwrite(SECONDARY_VM_EXEC_CONTROL, - vmx_secondary_exec_control & - ~SECONDARY_EXEC_WBINVD_EXITING); + if ( cpu_has_wbinvd_exiting ) + { + on_each_cpu(wbinvd_ipi, NULL, 1, 1); + } + else + { + wbinvd(); + /* Disable further WBINVD intercepts. */ + if ( (exit_reason == EXIT_REASON_WBINVD) && + (vmx_cpu_based_exec_control & + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) + __vmwrite(SECONDARY_VM_EXEC_CONTROL, + vmx_secondary_exec_control & + ~SECONDARY_EXEC_WBINVD_EXITING); + } } break; } diff -r ef4b60c99735 -r e82fb0729b51 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Fri Nov 16 16:22:00 2007 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Fri Nov 16 16:36:38 2007 +0000 @@ -136,6 +136,8 @@ extern u32 vmx_secondary_exec_control; extern bool_t cpu_has_vmx_ins_outs_instr_info; +#define cpu_has_wbinvd_exiting \ + (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING) #define cpu_has_vmx_virtualize_apic_accesses \ (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) #define cpu_has_vmx_tpr_shadow \ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |