[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] HVM/SVM: enable tsc scaling ratio for SVM
# HG changeset patch # User Wei Huang <wei.huang2@xxxxxxx> # Date 1306569488 -3600 # Node ID d7c755c25bb9d6ed77d64cb6736b6c4f339db1bf # Parent f6ce871e568949f5817470f6c7bab6ed1f8f6c13 HVM/SVM: enable tsc scaling ratio for SVM Future AMD CPUs support TSC scaling. It allows guests to have a different TSC frequency from host system using this formula: guest_tsc = host_tsc * tsc_ratio + vmcb_offset. The tsc_ratio is a 64bit MSR contains a fixed-point number in 8.32 format (8 bits for integer part and 32bits for fractional part). For instance 0x00000003_80000000 means tsc_ratio=3.5. This patch enables TSC scaling ratio for SVM. With it, guest VMs don't need take #VMEXIT to calculate a translated TSC value when it is running under TSC emulation mode. This can substancially reduce the rdtsc overhead. Signed-off-by: Wei Huang <wei.huang2@xxxxxxx> --- diff -r f6ce871e5689 -r d7c755c25bb9 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Sat May 28 08:57:12 2011 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Sat May 28 08:58:08 2011 +0100 @@ -640,8 +640,23 @@ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct vmcb_struct *n1vmcb, *n2vmcb; uint64_t n2_tsc_offset = 0; + struct domain *d = v->domain; - if ( !nestedhvm_enabled(v->domain) ) { + if ( !nestedhvm_enabled(d) ) { + /* Re-adjust the offset value when TSC_RATIO is available */ + if ( cpu_has_tsc_ratio && d->arch.vtsc ) + { + uint64_t host_tsc, guest_tsc; + + rdtscll(host_tsc); + guest_tsc = hvm_get_guest_tsc(v); + + /* calculate hi,lo parts in 64bits to prevent overflow */ + offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) + + (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz; + offset = guest_tsc - offset; + } + vmcb_set_tsc_offset(vmcb, offset); return; } @@ -749,6 +764,19 @@ return 0; } +static inline void svm_tsc_ratio_save(struct vcpu *v) +{ + /* Other vcpus might not have vtsc enabled. So disable TSC_RATIO here. */ + if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) + wrmsrl(MSR_AMD64_TSC_RATIO, DEFAULT_TSC_RATIO); +} + +static inline void svm_tsc_ratio_load(struct vcpu *v) +{ + if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) + wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v)); +} + static void svm_ctxt_switch_from(struct vcpu *v) { int cpu = smp_processor_id(); @@ -758,6 +786,7 @@ svm_save_dr(v); vpmu_save(v); svm_lwp_save(v); + svm_tsc_ratio_save(v); svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); @@ -802,6 +831,7 @@ vmcb->cleanbits.bytes = 0; vpmu_load(v); svm_lwp_load(v); + svm_tsc_ratio_load(v); if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); diff -r f6ce871e5689 -r d7c755c25bb9 xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Sat May 28 08:57:12 2011 +0100 +++ b/xen/arch/x86/hvm/svm/vmcb.c Sat May 28 08:58:08 2011 +0100 @@ -128,7 +128,9 @@ /* TSC. */ vmcb->_tsc_offset = 0; - if ( v->domain->arch.vtsc ) + + /* Don't need to intercept RDTSC if CPU supports TSC rate scaling */ + if ( v->domain->arch.vtsc && !cpu_has_tsc_ratio ) { vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; vmcb->_general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP; diff -r f6ce871e5689 -r d7c755c25bb9 xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h Sat May 28 08:57:12 2011 +0100 +++ b/xen/include/asm-x86/hvm/svm/svm.h Sat May 28 08:58:08 2011 +0100 @@ -87,7 +87,15 @@ #define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN) #define cpu_has_svm_decode cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS) #define cpu_has_pause_filter cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER) +#define cpu_has_tsc_ratio cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR) #define SVM_PAUSEFILTER_INIT 3000 +/* TSC rate */ +#define DEFAULT_TSC_RATIO 0x0000000100000000ULL +#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL +#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \ + ~TSC_RATIO_RSVD_BITS ) +#define vcpu_tsc_ratio(v) TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz) + #endif /* __ASM_X86_HVM_SVM_H__ */ diff -r f6ce871e5689 -r d7c755c25bb9 xen/include/asm-x86/msr-index.h --- a/xen/include/asm-x86/msr-index.h Sat May 28 08:57:12 2011 +0100 +++ b/xen/include/asm-x86/msr-index.h Sat May 28 08:58:08 2011 +0100 @@ -266,6 +266,9 @@ #define MSR_AMD_PATCHLEVEL 0x0000008b #define MSR_AMD_PATCHLOADER 0xc0010020 +/* AMD TSC RATE MSR */ +#define MSR_AMD64_TSC_RATIO 0xc0000104 + /* AMD Lightweight Profiling MSRs */ #define MSR_AMD64_LWP_CFG 0xc0000105 #define MSR_AMD64_LWP_CBADDR 0xc0000106 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |