[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Scaling guest's TSC when the target machine's frequency is different



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1245837922 -3600
# Node ID 81edfffb3aff92e03acd5ed68477763eb438c9ae
# Parent  50634c215234e15dc8f4b324d9ee3b5fad779c34
Scaling guest's TSC when the target machine's frequency is different
with its requirement.

Using trap&emulate for guest's each rdtsc instruction first, maybe it
can be optimized later.

Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |   65 +++++++++++++++++++++++++++++++--------
 xen/arch/x86/hvm/save.c          |    9 +++++
 xen/arch/x86/hvm/vmx/vmx.c       |   11 ++++++
 xen/arch/x86/hvm/vpt.c           |    3 +
 xen/include/asm-x86/hvm/domain.h |    2 -
 xen/include/asm-x86/hvm/hvm.h    |    4 ++
 6 files changed, 79 insertions(+), 15 deletions(-)

diff -r 50634c215234 -r 81edfffb3aff xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Jun 24 11:05:22 2009 +0100
@@ -144,26 +144,67 @@ uint8_t hvm_combine_hw_exceptions(uint8_
     return TRAP_double_fault;
 }
 
+void hvm_enable_rdtsc_exiting(struct domain *d)
+{
+    struct vcpu *v;
+
+    if ( opt_softtsc || !hvm_funcs.enable_rdtsc_exiting )
+        return;
+
+    for_each_vcpu ( d, v )
+        hvm_funcs.enable_rdtsc_exiting(v);
+}
+
+int hvm_gtsc_need_scale(struct domain *d)
+{
+    uint32_t gtsc_mhz, htsc_mhz;
+
+    gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000;
+    htsc_mhz = opt_softtsc ? 1000 : ((uint32_t)cpu_khz / 1000);
+
+    d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz));
+    return d->arch.hvm_domain.tsc_scaled;
+}
+
+static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc)
+{
+    uint32_t gtsc_khz, htsc_khz;
+
+    if ( !v->domain->arch.hvm_domain.tsc_scaled )
+        return host_tsc;
+
+    htsc_khz = opt_softtsc ? 1000000 : cpu_khz;
+    gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz;
+    return muldiv64(host_tsc, gtsc_khz, htsc_khz);
+}
+
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
 {
-    u64 host_tsc;
-
-    rdtscll(host_tsc);
-
-    v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - host_tsc;
-    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
-}
-
-u64 hvm_get_guest_tsc(struct vcpu *v)
-{
-    u64 host_tsc;
+    uint64_t host_tsc, scaled_htsc;
 
     if ( opt_softtsc )
         host_tsc = hvm_get_guest_time(v);
     else
         rdtscll(host_tsc);
 
-    return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
+    scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc);
+
+    v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - scaled_htsc;
+    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+}
+
+u64 hvm_get_guest_tsc(struct vcpu *v)
+{
+    uint64_t host_tsc, scaled_htsc;
+
+    if ( opt_softtsc )
+        host_tsc = hvm_get_guest_time(v);
+    else
+        rdtscll(host_tsc);
+
+    scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc);
+
+    return scaled_htsc + v->arch.hvm_vcpu.cache_tsc_offset;
 }
 
 void hvm_migrate_timers(struct vcpu *v)
diff -r 50634c215234 -r 81edfffb3aff xen/arch/x86/hvm/save.c
--- a/xen/arch/x86/hvm/save.c   Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/arch/x86/hvm/save.c   Wed Jun 24 11:05:22 2009 +0100
@@ -63,6 +63,15 @@ int arch_hvm_load(struct domain *d, stru
     /* Restore guest's preferred TSC frequency. */
     d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz;
 
+    if ( hdr->gtsc_khz && hvm_gtsc_need_scale(d) )
+    {
+        hvm_enable_rdtsc_exiting(d);
+        gdprintk(XENLOG_WARNING, "Loading VM(id:%d) expects freq: %dmHz, "
+                "but host's freq :%"PRIu64"mHz, trap and emulate rdtsc!!!\n",
+                d->domain_id, hdr->gtsc_khz / 1000, opt_softtsc ? 1000 :
+                cpu_khz / 1000);
+    }
+
     /* VGA state is not saved/restored, so we nobble the cache. */
     d->arch.hvm_domain.stdvga.cache = 0;
 
diff -r 50634c215234 -r 81edfffb3aff xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 24 11:05:22 2009 +0100
@@ -947,6 +947,14 @@ static void vmx_set_tsc_offset(struct vc
     vmx_vmcs_exit(v);
 }
 
+static void vmx_enable_rdtsc_exiting(struct vcpu *v)
+{
+    vmx_vmcs_enter(v);
+    v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_vmcs_exit(v);
+ }
+
 void do_nmi(struct cpu_user_regs *);
 
 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
@@ -1395,7 +1403,8 @@ static struct hvm_function_table vmx_fun
     .msr_write_intercept  = vmx_msr_write_intercept,
     .invlpg_intercept     = vmx_invlpg_intercept,
     .set_uc_mode          = vmx_set_uc_mode,
-    .set_info_guest       = vmx_set_info_guest
+    .set_info_guest       = vmx_set_info_guest,
+    .enable_rdtsc_exiting = vmx_enable_rdtsc_exiting
 };
 
 static unsigned long *vpid_bitmap;
diff -r 50634c215234 -r 81edfffb3aff xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/arch/x86/hvm/vpt.c    Wed Jun 24 11:05:22 2009 +0100
@@ -33,7 +33,8 @@ void hvm_init_guest_time(struct domain *
     pl->stime_offset = -(u64)get_s_time();
     pl->last_guest_time = 0;
 
-    d->arch.hvm_domain.gtsc_khz = cpu_khz;
+    d->arch.hvm_domain.gtsc_khz = opt_softtsc ? 1000000 : cpu_khz;
+    d->arch.hvm_domain.tsc_scaled = 0;
 }
 
 u64 hvm_get_guest_time(struct vcpu *v)
diff -r 50634c215234 -r 81edfffb3aff xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Wed Jun 24 11:05:22 2009 +0100
@@ -45,7 +45,7 @@ struct hvm_domain {
     struct hvm_ioreq_page  buf_ioreq;
 
     uint32_t               gtsc_khz; /* kHz */
-    uint32_t               pad0;
+    bool_t                 tsc_scaled;
     struct pl_time         pl_time;
 
     struct hvm_io_handler  io_handler;
diff -r 50634c215234 -r 81edfffb3aff xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Jun 24 10:57:00 2009 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Jun 24 11:05:22 2009 +0100
@@ -129,6 +129,7 @@ struct hvm_function_table {
     void (*invlpg_intercept)(unsigned long vaddr);
     void (*set_uc_mode)(struct vcpu *v);
     void (*set_info_guest)(struct vcpu *v);
+    void (*enable_rdtsc_exiting)(struct vcpu *v);
 };
 
 extern struct hvm_function_table hvm_funcs;
@@ -282,6 +283,9 @@ int hvm_event_needs_reinjection(uint8_t 
 
 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
+void hvm_enable_rdtsc_exiting(struct domain *d);
+int hvm_gtsc_need_scale(struct domain *d);
+
 static inline int hvm_cpu_up(void)
 {
     if ( hvm_funcs.cpu_up )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.