[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.1-testing] HVM/SVM: enable tsc scaling ratio for SVM



# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1334218394 -3600
# Node ID 4ad262a48a715a022582567fa7a996389cb37e6c
# Parent  7d9df818d302a593ab2e1b3e795ee8f369af96ae
HVM/SVM: enable tsc scaling ratio for SVM

Future AMD CPUs support TSC scaling. It allows guests to have a
different TSC frequency from host system using this formula: guest_tsc
= host_tsc * tsc_ratio + vmcb_offset. The tsc_ratio is a 64bit MSR
contains a fixed-point number in 8.32 format (8 bits for integer part
and 32bits for fractional part). For instance 0x00000003_80000000
means tsc_ratio=3.5.

This patch enables TSC scaling ratio for SVM. With it, guest VMs don't
need take #VMEXIT to calculate a translated TSC value when it is
running under TSC emulation mode. This can substancially reduce the
rdtsc overhead.

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
xen-unstable changeset:   23437:d7c755c25bb9
xen-unstable date:        Sat May 28 08:58:08 2011 +0100
---


diff -r 7d9df818d302 -r 4ad262a48a71 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Apr 12 09:08:13 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Apr 12 09:13:14 2012 +0100
@@ -588,6 +588,22 @@ static void svm_set_segment_register(str
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct domain *d = v->domain;
+
+    /* Re-adjust the offset value when TSC_RATIO is available */
+    if ( cpu_has_tsc_ratio && d->arch.vtsc )
+    {
+        uint64_t host_tsc, guest_tsc;
+
+        rdtscll(host_tsc);
+        guest_tsc = hvm_get_guest_tsc(v);
+            
+        /* calculate hi,lo parts in 64bits to prevent overflow */
+        offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) +
+            (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz;
+        offset = guest_tsc - offset;
+    }
+
     vmcb_set_tsc_offset(vmcb, offset);
 }
 
@@ -638,6 +654,19 @@ static void svm_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
+static inline void svm_tsc_ratio_save(struct vcpu *v)
+{
+    /* Other vcpus might not have vtsc enabled. So disable TSC_RATIO here. */
+    if ( cpu_has_tsc_ratio && v->domain->arch.vtsc )
+        wrmsrl(MSR_AMD64_TSC_RATIO, DEFAULT_TSC_RATIO);
+}
+
+static inline void svm_tsc_ratio_load(struct vcpu *v)
+{
+    if ( cpu_has_tsc_ratio && v->domain->arch.vtsc ) 
+        wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v));
+}
+
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
@@ -646,6 +675,7 @@ static void svm_ctxt_switch_from(struct 
 
     svm_save_dr(v);
     vpmu_save(v);
+    svm_tsc_ratio_save(v);
 
     svm_sync_vmcb(v);
     svm_vmload(per_cpu(root_vmcb, cpu));
@@ -689,6 +719,7 @@ static void svm_ctxt_switch_to(struct vc
     svm_vmload(vmcb);
     vmcb->cleanbits.bytes = 0;
     vpmu_load(v);
+    svm_tsc_ratio_load(v);
 
     if ( cpu_has_rdtscp )
         wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
diff -r 7d9df818d302 -r 4ad262a48a71 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu Apr 12 09:08:13 2012 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu Apr 12 09:13:14 2012 +0100
@@ -165,7 +165,9 @@ static int construct_vmcb(struct vcpu *v
 
     /* TSC. */
     vmcb->_tsc_offset = 0;
-    if ( v->domain->arch.vtsc )
+
+    /* Don't need to intercept RDTSC if CPU supports TSC rate scaling */
+    if ( v->domain->arch.vtsc && !cpu_has_tsc_ratio )
     {
         vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
         vmcb->_general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP;
diff -r 7d9df818d302 -r 4ad262a48a71 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Thu Apr 12 09:08:13 2012 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Thu Apr 12 09:13:14 2012 +0100
@@ -82,5 +82,13 @@ extern u32 svm_feature_flags;
 #define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN)
 #define cpu_has_svm_decode    cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS)
 #define cpu_has_pause_filter  cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER)
+#define cpu_has_tsc_ratio     cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR)
+
+/* TSC rate */
+#define DEFAULT_TSC_RATIO       0x0000000100000000ULL
+#define TSC_RATIO_RSVD_BITS     0xffffff0000000000ULL
+#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \
+                                  ~TSC_RATIO_RSVD_BITS )
+#define vcpu_tsc_ratio(v)       TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz)
 
 #endif /* __ASM_X86_HVM_SVM_H__ */
diff -r 7d9df818d302 -r 4ad262a48a71 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h   Thu Apr 12 09:08:13 2012 +0100
+++ b/xen/include/asm-x86/msr-index.h   Thu Apr 12 09:13:14 2012 +0100
@@ -265,6 +265,9 @@
 #define MSR_AMD_PATCHLEVEL             0x0000008b
 #define MSR_AMD_PATCHLOADER            0xc0010020
 
+/* AMD TSC RATE MSR */
+#define MSR_AMD64_TSC_RATIO            0xc0000104
+
 /* AMD OS Visible Workaround MSRs */
 #define MSR_AMD_OSVW_ID_LENGTH          0xc0010140
 #define MSR_AMD_OSVW_STATUS             0xc0010141

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.