[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-4.1-testing] Fix save/restore of guest PAT table in HAP paging mode.
# HG changeset patch # User Gianluca Guida <gianluca.guida@xxxxxxxxxx> # Date 1346762536 -7200 # Node ID 201895ed11bcad4c74a2bb953f93e222c8f50abe # Parent e72c098aa73c2689f74167ff853371a184d30058 Fix save/restore of guest PAT table in HAP paging mode. HAP paging mode guests use direct MSR read/write into the VMCS/VMCB for the guest PAT table, while the current save/restore code was accessing only the pat_cr field in hvm_vcpu, used when intercepting the MSR mostly in shadow mode (the Intel scenario is a bit more complicated). This patch fixes this issue creating a new couple of hvm_funcs, get/set_guest_pat, that access the right PAT table based on the paging mode and guest configuration. Signed-off-by: Gianluca Guida <gianluca.guida@xxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> xen-unstable changeset: 25196:375fa55c7a6c xen-unstable date: Tue Apr 17 07:29:26 UTC 2012 --- diff -r e72c098aa73c -r 201895ed11bc xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Sep 04 14:40:53 2012 +0200 +++ b/xen/arch/x86/hvm/hvm.c Tue Sep 04 14:42:16 2012 +0200 @@ -217,6 +217,31 @@ void hvm_set_rdtsc_exiting(struct domain hvm_funcs.set_rdtsc_exiting(v, enable); } +void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat) +{ + if ( !hvm_funcs.get_guest_pat(v, guest_pat) ) + *guest_pat = v->arch.hvm_vcpu.pat_cr; +} + +int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) +{ + int i; + uint8_t *value = (uint8_t *)&guest_pat; + + for ( i = 0; i < 8; i++ ) + if ( unlikely(!(value[i] == 0 || value[i] == 1 || + value[i] == 4 || value[i] == 5 || + value[i] == 6 || value[i] == 7)) ) { + HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid guest PAT: %"PRIx64"\n", + guest_pat); + return 0; + } + + if ( !hvm_funcs.set_guest_pat(v, guest_pat) ) + v->arch.hvm_vcpu.pat_cr = guest_pat; + return 1; +} + void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) { uint64_t tsc; @@ -2394,7 +2419,7 @@ int hvm_msr_read_intercept(unsigned int break; case MSR_IA32_CR_PAT: - *msr_content = v->arch.hvm_vcpu.pat_cr; + hvm_get_guest_pat(v, msr_content); break; case MSR_MTRRcap: @@ -2510,7 +2535,7 @@ int hvm_msr_write_intercept(unsigned int break; case MSR_IA32_CR_PAT: - if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) ) + if ( !hvm_set_guest_pat(v, msr_content) ) goto gp_fault; break; diff -r e72c098aa73c -r 201895ed11bc xen/arch/x86/hvm/mtrr.c --- a/xen/arch/x86/hvm/mtrr.c Tue Sep 04 14:40:53 2012 +0200 +++ b/xen/arch/x86/hvm/mtrr.c Tue Sep 04 14:42:16 2012 +0200 @@ -406,26 +406,6 @@ uint32_t get_pat_flags(struct vcpu *v, return pat_type_2_pte_flags(pat_entry_value); } -/* Helper funtions for seting mtrr/pat */ -bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content) -{ - uint8_t *value = (uint8_t*)&msr_content; - int32_t i; - - if ( *pat != msr_content ) - { - for ( i = 0; i < 8; i++ ) - if ( unlikely(!(value[i] == 0 || value[i] == 1 || - value[i] == 4 || value[i] == 5 || - value[i] == 6 || value[i] == 7)) ) - return 0; - - *pat = msr_content; - } - - return 1; -} - bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content) { uint8_t def_type = msr_content & 0xff; @@ -636,7 +616,7 @@ static int hvm_save_mtrr_msr(struct doma { mtrr_state = &v->arch.hvm_vcpu.mtrr; - hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr; + hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr); hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type | (mtrr_state->enabled << 10); @@ -681,7 +661,7 @@ static int hvm_load_mtrr_msr(struct doma mtrr_state = &v->arch.hvm_vcpu.mtrr; - pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr); + hvm_set_guest_pat(v, hw_mtrr.msr_pat_cr); mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap; diff -r e72c098aa73c -r 201895ed11bc xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue Sep 04 14:40:53 2012 +0200 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Sep 04 14:42:16 2012 +0200 @@ -585,6 +585,28 @@ static void svm_set_segment_register(str svm_vmload(vmcb); } +static int svm_set_guest_pat(struct vcpu *v, u64 gpat) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + if ( !paging_mode_hap(v->domain) ) + return 0; + + vmcb_set_g_pat(vmcb, gpat); + return 1; +} + +static int svm_get_guest_pat(struct vcpu *v, u64 *gpat) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + if ( !paging_mode_hap(v->domain) ) + return 0; + + *gpat = vmcb_get_g_pat(vmcb); + return 1; +} + static void svm_set_tsc_offset(struct vcpu *v, u64 offset) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -1519,6 +1541,8 @@ static struct hvm_function_table __read_ .update_host_cr3 = svm_update_host_cr3, .update_guest_cr = svm_update_guest_cr, .update_guest_efer = svm_update_guest_efer, + .set_guest_pat = svm_set_guest_pat, + .get_guest_pat = svm_get_guest_pat, .set_tsc_offset = svm_set_tsc_offset, .inject_exception = svm_inject_exception, .init_hypercall_page = svm_init_hypercall_page, diff -r e72c098aa73c -r 201895ed11bc xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Sep 04 14:40:53 2012 +0200 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Sep 04 14:42:16 2012 +0200 @@ -921,6 +921,34 @@ static void vmx_set_segment_register(str vmx_vmcs_exit(v); } +static int vmx_set_guest_pat(struct vcpu *v, u64 gpat) +{ + if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) ) + return 0; + + vmx_vmcs_enter(v); + __vmwrite(GUEST_PAT, gpat); +#ifdef __i386__ + __vmwrite(GUEST_PAT_HIGH, gpat >> 32); +#endif + vmx_vmcs_exit(v); + return 1; +} + +static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat) +{ + if ( !cpu_has_vmx_pat || !paging_mode_hap(v->domain) ) + return 0; + + vmx_vmcs_enter(v); + *gpat = __vmread(GUEST_PAT); +#ifdef __i386__ + *gpat |= (u64)__vmread(GUEST_PAT_HIGH) << 32; +#endif + vmx_vmcs_exit(v); + return 1; +} + static void vmx_set_tsc_offset(struct vcpu *v, u64 offset) { vmx_vmcs_enter(v); @@ -1384,6 +1412,8 @@ static struct hvm_function_table __read_ .update_host_cr3 = vmx_update_host_cr3, .update_guest_cr = vmx_update_guest_cr, .update_guest_efer = vmx_update_guest_efer, + .set_guest_pat = vmx_set_guest_pat, + .get_guest_pat = vmx_get_guest_pat, .set_tsc_offset = vmx_set_tsc_offset, .inject_exception = vmx_inject_exception, .init_hypercall_page = vmx_init_hypercall_page, diff -r e72c098aa73c -r 201895ed11bc xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue Sep 04 14:40:53 2012 +0200 +++ b/xen/include/asm-x86/hvm/hvm.h Tue Sep 04 14:42:16 2012 +0200 @@ -116,6 +116,9 @@ struct hvm_function_table { void (*update_guest_cr)(struct vcpu *v, unsigned int cr); void (*update_guest_efer)(struct vcpu *v); + int (*get_guest_pat)(struct vcpu *v, u64 *); + int (*set_guest_pat)(struct vcpu *v, u64); + void (*set_tsc_offset)(struct vcpu *v, u64 offset); void (*inject_exception)(unsigned int trapnr, int errcode, @@ -166,6 +169,9 @@ void hvm_vcpu_reset_state(struct vcpu *v bool_t hvm_send_assist_req(struct vcpu *v); +void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat); +int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat); + void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc); u64 hvm_get_guest_tsc(struct vcpu *v); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |