[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] nestedsvm: Support TSC Rate MSR



# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1310804602 -3600
# Node ID 330b6b1d5b7a4e375ae3ef150e5b808a1e7c8713
# Parent  9afeab10a65d989b46768bcd57b8dfba85ff5f8d
nestedsvm: Support TSC Rate MSR

Support TSC Rate MSR and enable TSC scaling for
nested virtualization.

With it, guest VMs don't need take #VMEXIT to calculate a translated
TSC value when it is running under TSC emulation mode.

I measured native performance of the rdtsc instruction
in the l2 guest with xen-on-xen and both host and
and l1 guest run under TSC emulation mode.

TSC scaling just needs MSR emulation and correct tsc offset
calculation to be done and thus can be emulated also on older
hardware. In this case rdtsc instruction is intercepted and
handled by the host directly and safes the cost of a full
VMRUN/VMEXIT emulation cycle.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---


diff -r 9afeab10a65d -r 330b6b1d5b7a tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c        Sat Jul 16 09:18:45 2011 +0100
+++ b/tools/libxc/xc_cpuid_x86.c        Sat Jul 16 09:23:22 2011 +0100
@@ -157,7 +157,7 @@
             SVM_FEATURE_DECODEASSISTS);
 
         /* Pass 2: Always enable SVM features which are emulated */
-        regs[3] |= SVM_FEATURE_VMCBCLEAN;
+        regs[3] |= SVM_FEATURE_VMCBCLEAN | SVM_FEATURE_TSCRATEMSR;
         break;
     }
 
diff -r 9afeab10a65d -r 330b6b1d5b7a xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Sat Jul 16 09:18:45 2011 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Sat Jul 16 09:23:22 2011 +0100
@@ -147,6 +147,8 @@
     svm->ns_msr_hsavepa = VMCX_EADDR;
     svm->ns_ovvmcb_pa = VMCX_EADDR;
 
+    svm->ns_tscratio = DEFAULT_TSC_RATIO;
+
     svm->ns_cr_intercepts = 0;
     svm->ns_dr_intercepts = 0;
     svm->ns_exception_intercepts = 0;
@@ -1185,6 +1187,9 @@
     case MSR_K8_VM_HSAVE_PA:
         *msr_content = svm->ns_msr_hsavepa;
         break;
+    case MSR_AMD64_TSC_RATIO:
+        *msr_content = svm->ns_tscratio;
+        break;
     default:
         ret = 0;
         break;
@@ -1211,6 +1216,16 @@
         }
         svm->ns_msr_hsavepa = msr_content;
         break;
+    case MSR_AMD64_TSC_RATIO:
+        if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) {
+            gdprintk(XENLOG_ERR,
+                "reserved bits set in MSR_AMD64_TSC_RATIO 0x%"PRIx64"\n",
+                msr_content);
+            ret = -1; /* inject #GP */
+            break;
+        }
+        svm->ns_tscratio = msr_content;
+        break;
     default:
         ret = 0;
         break;
diff -r 9afeab10a65d -r 330b6b1d5b7a xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Sat Jul 16 09:18:45 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Sat Jul 16 09:23:22 2011 +0100
@@ -635,28 +635,37 @@
         svm_vmload(vmcb);
 }
 
+static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
+    uint64_t ratio)
+{
+    uint64_t offset;
+
+    if (ratio == DEFAULT_TSC_RATIO)
+        return guest_tsc - host_tsc;
+
+    /* calculate hi,lo parts in 64bits to prevent overflow */
+    offset = (((host_tsc >> 32U) * (ratio >> 32U)) << 32U) +
+          (host_tsc & 0xffffffffULL) * (ratio & 0xffffffffULL);
+    return guest_tsc - offset;
+}
+
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     struct vmcb_struct *n1vmcb, *n2vmcb;
     uint64_t n2_tsc_offset = 0;
     struct domain *d = v->domain;
+    uint64_t host_tsc, guest_tsc;
+
+    guest_tsc = hvm_get_guest_tsc(v);
+
+    /* Re-adjust the offset value when TSC_RATIO is available */
+    if ( cpu_has_tsc_ratio && d->arch.vtsc ) {
+        rdtscll(host_tsc);
+        offset = svm_get_tsc_offset(host_tsc, guest_tsc, vcpu_tsc_ratio(v));
+    }
 
     if ( !nestedhvm_enabled(d) ) {
-        /* Re-adjust the offset value when TSC_RATIO is available */
-        if ( cpu_has_tsc_ratio && d->arch.vtsc )
-        {
-            uint64_t host_tsc, guest_tsc;
-
-            rdtscll(host_tsc);
-            guest_tsc = hvm_get_guest_tsc(v);
-            
-            /* calculate hi,lo parts in 64bits to prevent overflow */
-            offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) +
-                     (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz;
-            offset = guest_tsc - offset;
-        }
-
         vmcb_set_tsc_offset(vmcb, offset);
         return;
     }
@@ -665,8 +674,14 @@
     n2vmcb = vcpu_nestedhvm(v).nv_n2vmcx;
 
     if ( nestedhvm_vcpu_in_guestmode(v) ) {
+        struct nestedsvm *svm = &vcpu_nestedsvm(v);
+
         n2_tsc_offset = vmcb_get_tsc_offset(n2vmcb) -
             vmcb_get_tsc_offset(n1vmcb);
+        if ( svm->ns_tscratio != DEFAULT_TSC_RATIO ) {
+            n2_tsc_offset = svm_get_tsc_offset(guest_tsc,
+                guest_tsc + n2_tsc_offset, svm->ns_tscratio);
+        }
         vmcb_set_tsc_offset(n1vmcb, offset);
     }
 
@@ -1107,6 +1122,7 @@
     P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
     P(cpu_has_svm_decode, "DecodeAssists");
     P(cpu_has_pause_filter, "Pause-Intercept Filter");
+    P(cpu_has_tsc_ratio, "TSC Rate MSR");
 #undef P
 
     if ( !printed )
diff -r 9afeab10a65d -r 330b6b1d5b7a xen/include/asm-x86/hvm/svm/nestedsvm.h
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h   Sat Jul 16 09:18:45 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h   Sat Jul 16 09:23:22 2011 +0100
@@ -36,6 +36,11 @@
      */
     uint64_t ns_ovvmcb_pa;
 
+    /* virtual tscratio holding the value l1 guest writes to the
+     * MSR_AMD64_TSC_RATIO MSR.
+     */
+    uint64_t ns_tscratio;
+
     /* Cached real intercepts of the l2 guest */
     uint32_t ns_cr_intercepts;
     uint32_t ns_dr_intercepts;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.