[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [HVM][SVM] flush all entries from guest ASIDs when xen writes CR3.
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx> # Date 1182333337 -3600 # Node ID 739d698986e9acd6ca5cf0be831a515f93c9d5bc # Parent fb5077ecf9a440ab4fa128788172ce604c4b28f2 [HVM][SVM] flush all entries from guest ASIDs when xen writes CR3. This makes the assumptions about TLB flush behaviour in the page-type system and the shadow code safe again, and fixes a corner case of NPT log-dirty. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx> --- xen/arch/x86/flushtlb.c | 4 ++ xen/arch/x86/hvm/svm/asid.c | 72 ++++++++++++++++++------------------- xen/arch/x86/hvm/svm/svm.c | 9 ++++ xen/arch/x86/hvm/vmx/vmx.c | 7 +++ xen/include/asm-x86/hvm/hvm.h | 15 +++++++ xen/include/asm-x86/hvm/support.h | 1 xen/include/asm-x86/hvm/svm/asid.h | 1 7 files changed, 72 insertions(+), 37 deletions(-) diff -r fb5077ecf9a4 -r 739d698986e9 xen/arch/x86/flushtlb.c --- a/xen/arch/x86/flushtlb.c Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/arch/x86/flushtlb.c Wed Jun 20 10:55:37 2007 +0100 @@ -80,6 +80,8 @@ void write_cr3(unsigned long cr3) t = pre_flush(); + hvm_flush_guest_tlbs(); + #ifdef USER_MAPPINGS_ARE_GLOBAL __pge_off(); __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); @@ -103,6 +105,8 @@ void local_flush_tlb(void) t = pre_flush(); + hvm_flush_guest_tlbs(); + #ifdef USER_MAPPINGS_ARE_GLOBAL __pge_off(); __pge_on(); diff -r fb5077ecf9a4 -r 739d698986e9 xen/arch/x86/hvm/svm/asid.c --- a/xen/arch/x86/hvm/svm/asid.c Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/arch/x86/hvm/svm/asid.c Wed Jun 20 10:55:37 2007 +0100 @@ -60,7 +60,7 @@ struct svm_asid_data { u64 core_asid_generation; u32 next_asid; u32 max_asid; - u32 erratum170; + u32 erratum170:1; }; static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data); @@ -140,25 +140,21 @@ void svm_asid_init_vcpu(struct vcpu *v) } /* - * Increase the Generation to make free ASIDs. Flush physical TLB and give - * ASID. - */ -static void svm_asid_handle_inc_generation(struct vcpu *v) -{ - struct svm_asid_data *data = svm_asid_core_data(); - - if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) ) - { - /* Handle ASID overflow. */ + * Increase the Generation to make free ASIDs, and indirectly cause a + * TLB flush of all ASIDs on the next vmrun. + */ +void svm_asid_inc_generation(void) +{ + struct svm_asid_data *data = svm_asid_core_data(); + + if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) ) + { + /* Move to the next generation. We can't flush the TLB now + * because you need to vmrun to do that, and current might not + * be a HVM vcpu, but the first HVM vcpu that runs after this + * will pick up ASID 1 and flush the TLBs. */ data->core_asid_generation++; - data->next_asid = SVM_ASID_FIRST_GUEST_ASID + 1; - - /* Handle VCPU. */ - v->arch.hvm_svm.vmcb->guest_asid = SVM_ASID_FIRST_GUEST_ASID; - v->arch.hvm_svm.asid_generation = data->core_asid_generation; - - /* Trigger flush of physical TLB. */ - v->arch.hvm_svm.vmcb->tlb_control = 1; + data->next_asid = SVM_ASID_FIRST_GUEST_ASID; return; } @@ -168,11 +164,12 @@ static void svm_asid_handle_inc_generati * this core (flushing TLB always). So correctness is established; it * only runs a bit slower. */ - printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n"); - data->erratum170 = 1; - data->core_asid_generation = SVM_ASID_INVALID_GENERATION; - - svm_asid_init_vcpu(v); + if ( !data->erratum170 ) + { + printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n"); + data->erratum170 = 1; + data->core_asid_generation = SVM_ASID_INVALID_GENERATION; + } } /* @@ -202,18 +199,21 @@ asmlinkage void svm_asid_handle_vmrun(vo return; } - /* Different ASID generations trigger fetching of a fresh ASID. */ - if ( likely(data->next_asid <= data->max_asid) ) - { - /* There is a free ASID. */ - v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++; - v->arch.hvm_svm.asid_generation = data->core_asid_generation; - v->arch.hvm_svm.vmcb->tlb_control = 0; - return; - } - - /* Slow path, may cause TLB flush. */ - svm_asid_handle_inc_generation(v); + /* If there are no free ASIDs, need to go to a new generation */ + if ( unlikely(data->next_asid > data->max_asid) ) + svm_asid_inc_generation(); + + /* Now guaranteed to be a free ASID. */ + v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++; + v->arch.hvm_svm.asid_generation = data->core_asid_generation; + + /* When we assign ASID 1, flush all TLB entries. We need to do it + * here because svm_asid_inc_generation() can be called at any time, + * but the TLB flush can only happen on vmrun. */ + if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID ) + v->arch.hvm_svm.vmcb->tlb_control = 1; + else + v->arch.hvm_svm.vmcb->tlb_control = 0; } void svm_asid_inv_asid(struct vcpu *v) diff -r fb5077ecf9a4 -r 739d698986e9 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Jun 20 10:55:37 2007 +0100 @@ -598,6 +598,14 @@ static void svm_update_guest_cr3(struct v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; } +static void svm_flush_guest_tlbs(void) +{ + /* Roll over the CPU's ASID generation, so it gets a clean TLB when we + * next VMRUN. (If ASIDs are disabled, the whole TLB is flushed on + * VMRUN anyway). */ + svm_asid_inc_generation(); +} + static void svm_update_vtpr(struct vcpu *v, unsigned long value) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -948,6 +956,7 @@ static struct hvm_function_table svm_fun .get_segment_register = svm_get_segment_register, .update_host_cr3 = svm_update_host_cr3, .update_guest_cr3 = svm_update_guest_cr3, + .flush_guest_tlbs = svm_flush_guest_tlbs, .update_vtpr = svm_update_vtpr, .stts = svm_stts, .set_tsc_offset = svm_set_tsc_offset, diff -r fb5077ecf9a4 -r 739d698986e9 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 20 10:55:37 2007 +0100 @@ -1138,6 +1138,12 @@ static void vmx_update_guest_cr3(struct vmx_vmcs_exit(v); } +static void vmx_flush_guest_tlbs(void) +{ + /* No tagged TLB support on VMX yet. The fact that we're in Xen + * at all means any guest will have a clean TLB when it's next run, + * because VMRESUME will flush it for us. */ +} static void vmx_inject_exception( unsigned int trapnr, int errcode, unsigned long cr2) @@ -1205,6 +1211,7 @@ static struct hvm_function_table vmx_fun .get_segment_register = vmx_get_segment_register, .update_host_cr3 = vmx_update_host_cr3, .update_guest_cr3 = vmx_update_guest_cr3, + .flush_guest_tlbs = vmx_flush_guest_tlbs, .update_vtpr = vmx_update_vtpr, .stts = vmx_stts, .set_tsc_offset = vmx_set_tsc_offset, diff -r fb5077ecf9a4 -r 739d698986e9 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Jun 20 10:55:37 2007 +0100 @@ -124,6 +124,13 @@ struct hvm_function_table { void (*update_guest_cr3)(struct vcpu *v); /* + * Called to ensure than all guest-specific mappings in a tagged TLB + * are flushed; does *not* flush Xen's TLB entries, and on + * processors without a tagged TLB it will be a noop. + */ + void (*flush_guest_tlbs)(void); + + /* * Reflect the virtual APIC's value in the guest's V_TPR register */ void (*update_vtpr)(struct vcpu *v, unsigned long value); @@ -148,6 +155,7 @@ struct hvm_function_table { }; extern struct hvm_function_table hvm_funcs; +extern int hvm_enabled; int hvm_domain_initialise(struct domain *d); void hvm_domain_relinquish_resources(struct domain *d); @@ -230,6 +238,13 @@ hvm_update_vtpr(struct vcpu *v, unsigned } void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3); + +static inline void +hvm_flush_guest_tlbs(void) +{ + if ( hvm_enabled ) + hvm_funcs.flush_guest_tlbs(); +} void hvm_hypercall_page_initialise(struct domain *d, void *hypercall_page); diff -r fb5077ecf9a4 -r 739d698986e9 xen/include/asm-x86/hvm/support.h --- a/xen/include/asm-x86/hvm/support.h Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/include/asm-x86/hvm/support.h Wed Jun 20 10:55:37 2007 +0100 @@ -215,7 +215,6 @@ int hvm_load(struct domain *d, hvm_domai /* End of save/restore */ extern char hvm_io_bitmap[]; -extern int hvm_enabled; void hvm_enable(struct hvm_function_table *); void hvm_disable(void); diff -r fb5077ecf9a4 -r 739d698986e9 xen/include/asm-x86/hvm/svm/asid.h --- a/xen/include/asm-x86/hvm/svm/asid.h Tue Jun 19 18:07:53 2007 +0100 +++ b/xen/include/asm-x86/hvm/svm/asid.h Wed Jun 20 10:55:37 2007 +0100 @@ -30,6 +30,7 @@ void svm_asid_init(struct cpuinfo_x86 *c void svm_asid_init(struct cpuinfo_x86 *c); void svm_asid_init_vcpu(struct vcpu *v); void svm_asid_inv_asid(struct vcpu *v); +void svm_asid_inc_generation(void); /* * ASID related, guest triggered events. _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |