[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] nestedhvm: Allocate a separate host ASID for each L2 VCPU.
# HG changeset patch # User Keir Fraser <keir@xxxxxxx> # Date 1302858462 -3600 # Node ID 77da97cbddcd81681eea4e6b1b421ea4e5d1513c # Parent 1329d99b4f161b7617a667f601077cc92559f248 nestedhvm: Allocate a separate host ASID for each L2 VCPU. This avoids TLB flushing on every L1/L2 transition. Signed-off-by: Keir Fraser <keir@xxxxxxx> Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> --- diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/asid.c --- a/xen/arch/x86/hvm/asid.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/asid.c Fri Apr 15 10:07:42 2011 +0100 @@ -78,9 +78,15 @@ data->next_asid = 1; } +void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid) +{ + asid->generation = 0; +} + void hvm_asid_flush_vcpu(struct vcpu *v) { - v->arch.hvm_vcpu.asid_generation = 0; + hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid); + hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); } void hvm_asid_flush_core(void) @@ -102,9 +108,8 @@ data->disabled = 1; } -bool_t hvm_asid_handle_vmenter(void) +bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid) { - struct vcpu *curr = current; struct hvm_asid_data *data = &this_cpu(hvm_asid_data); /* On erratum #170 systems we must flush the TLB. @@ -113,7 +118,7 @@ goto disabled; /* Test if VCPU has valid ASID. */ - if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation ) + if ( asid->generation == data->core_asid_generation ) return 0; /* If there are no free ASIDs, need to go to a new generation */ @@ -126,17 +131,17 @@ } /* Now guaranteed to be a free ASID. */ - curr->arch.hvm_vcpu.asid = data->next_asid++; - curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation; + asid->asid = data->next_asid++; + asid->generation = data->core_asid_generation; /* * When we assign ASID 1, flush all TLB entries as we are starting a new * generation, and all old ASID allocations are now stale. */ - return (curr->arch.hvm_vcpu.asid == 1); + return (asid->asid == 1); disabled: - curr->arch.hvm_vcpu.asid = 0; + asid->asid = 0; return 0; } diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/svm/asid.c --- a/xen/arch/x86/hvm/svm/asid.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/svm/asid.c Fri Apr 15 10:07:42 2011 +0100 @@ -22,6 +22,7 @@ #include <xen/perfc.h> #include <asm/hvm/svm/asid.h> #include <asm/amd.h> +#include <asm/hvm/nestedhvm.h> void svm_asid_init(struct cpuinfo_x86 *c) { @@ -42,17 +43,20 @@ { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; - bool_t need_flush = hvm_asid_handle_vmenter(); + struct hvm_vcpu_asid *p_asid = + nestedhvm_vcpu_in_guestmode(curr) + ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm_vcpu.n1asid; + bool_t need_flush = hvm_asid_handle_vmenter(p_asid); /* ASID 0 indicates that ASIDs are disabled. */ - if ( curr->arch.hvm_vcpu.asid == 0 ) + if ( p_asid->asid == 0 ) { vmcb_set_guest_asid(vmcb, 1); vmcb->tlb_control = 1; return; } - vmcb_set_guest_asid(vmcb, curr->arch.hvm_vcpu.asid); + vmcb_set_guest_asid(vmcb, p_asid->asid); vmcb->tlb_control = need_flush; } diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/svm/nestedsvm.c Fri Apr 15 10:07:42 2011 +0100 @@ -261,8 +261,6 @@ /* Cleanbits */ n1vmcb->cleanbits.bytes = 0; - hvm_asid_flush_vcpu(v); - return 0; } @@ -408,9 +406,7 @@ if (rc) return rc; - /* ASID */ - hvm_asid_flush_vcpu(v); - /* n2vmcb->_guest_asid = ns_vmcb->_guest_asid; */ + /* ASID - Emulation handled in hvm_asid_handle_vmenter() */ /* TLB control */ n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control; @@ -605,9 +601,13 @@ svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3; svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3; - nv->nv_flushp2m = (ns_vmcb->tlb_control - || (svm->ns_guest_asid != ns_vmcb->_guest_asid)); - svm->ns_guest_asid = ns_vmcb->_guest_asid; + nv->nv_flushp2m = ns_vmcb->tlb_control; + if ( svm->ns_guest_asid != ns_vmcb->_guest_asid ) + { + nv->nv_flushp2m = 1; + hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid); + svm->ns_guest_asid = ns_vmcb->_guest_asid; + } /* nested paging for the guest */ svm->ns_hap_enabled = (ns_vmcb->_np_enable) ? 1 : 0; diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Apr 15 10:07:42 2011 +0100 @@ -1580,6 +1580,15 @@ __update_guest_eip(regs, inst_len); } +static void svm_invlpga_intercept( + struct vcpu *v, unsigned long vaddr, uint32_t asid) +{ + svm_invlpga(vaddr, + (asid == 0) + ? v->arch.hvm_vcpu.n1asid.asid + : vcpu_nestedhvm(v).nv_n2asid.asid); +} + static void svm_invlpg_intercept(unsigned long vaddr) { struct vcpu *curr = current; @@ -1894,11 +1903,14 @@ case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE: case VMEXIT_INVLPG: - case VMEXIT_INVLPGA: if ( !handle_mmio() ) hvm_inject_exception(TRAP_gp_fault, 0, 0); break; + case VMEXIT_INVLPGA: + svm_invlpga_intercept(v, regs->rax, regs->ecx); + break; + case VMEXIT_VMMCALL: if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 ) break; diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Fri Apr 15 10:07:42 2011 +0100 @@ -867,9 +867,6 @@ #endif } - if ( cpu_has_vmx_vpid ) - __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid); - if ( cpu_has_vmx_pat && paging_mode_hap(d) ) { u64 host_pat, guest_pat; diff -r 1329d99b4f16 -r 77da97cbddcd xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Apr 15 10:07:42 2011 +0100 @@ -2667,14 +2667,16 @@ { struct vcpu *curr = current; u32 new_asid, old_asid; + struct hvm_vcpu_asid *p_asid; bool_t need_flush; if ( !cpu_has_vmx_vpid ) goto out; - old_asid = curr->arch.hvm_vcpu.asid; - need_flush = hvm_asid_handle_vmenter(); - new_asid = curr->arch.hvm_vcpu.asid; + p_asid = &curr->arch.hvm_vcpu.n1asid; + old_asid = p_asid->asid; + need_flush = hvm_asid_handle_vmenter(p_asid); + new_asid = p_asid->asid; if ( unlikely(new_asid != old_asid) ) { diff -r 1329d99b4f16 -r 77da97cbddcd xen/include/asm-x86/hvm/asid.h --- a/xen/include/asm-x86/hvm/asid.h Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/include/asm-x86/hvm/asid.h Fri Apr 15 10:07:42 2011 +0100 @@ -23,11 +23,15 @@ #include <xen/config.h> struct vcpu; +struct hvm_vcpu_asid; /* Initialise ASID management for the current physical CPU. */ void hvm_asid_init(int nasids); -/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */ +/* Invalidate a particular ASID allocation: forces re-allocation. */ +void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid); + +/* Invalidate all ASID allocations for specified VCPU: forces re-allocation. */ void hvm_asid_flush_vcpu(struct vcpu *v); /* Flush all ASIDs on this processor core. */ @@ -35,7 +39,7 @@ /* Called before entry to guest context. Checks ASID allocation, returns a * boolean indicating whether all ASIDs must be flushed. */ -bool_t hvm_asid_handle_vmenter(void); +bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid); #endif /* __ASM_X86_HVM_ASID_H__ */ diff -r 1329d99b4f16 -r 77da97cbddcd xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/include/asm-x86/hvm/vcpu.h Fri Apr 15 10:07:42 2011 +0100 @@ -37,6 +37,11 @@ HVMIO_completed }; +struct hvm_vcpu_asid { + uint64_t generation; + uint32_t asid; +}; + #define VMCX_EADDR (~0ULL) struct nestedvcpu { @@ -57,6 +62,8 @@ bool_t nv_flushp2m; /* True, when p2m table must be flushed */ struct p2m_domain *nv_p2m; /* used p2m table for this vcpu */ + struct hvm_vcpu_asid nv_n2asid; + bool_t nv_vmentry_pending; bool_t nv_vmexit_pending; bool_t nv_vmswitch_in_progress; /* true during vmentry/vmexit emulation */ @@ -100,8 +107,7 @@ bool_t hcall_preempted; bool_t hcall_64bit; - uint64_t asid_generation; - uint32_t asid; + struct hvm_vcpu_asid n1asid; u32 msr_tsc_aux; diff -r 1329d99b4f16 -r 77da97cbddcd xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Fri Apr 15 08:52:08 2011 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Fri Apr 15 10:07:42 2011 +0100 @@ -377,7 +377,7 @@ type = INVVPID_ALL_CONTEXT; execute_invvpid: - __invvpid(type, v->arch.hvm_vcpu.asid, (u64)gva); + __invvpid(type, v->arch.hvm_vcpu.n1asid.asid, (u64)gva); } static inline void vpid_sync_all(void) _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |