[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Save/restore TSC adjust during HVM guest migration


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Thu, 27 Sep 2012 22:33:21 +0000
  • Delivery-date: Thu, 27 Sep 2012 22:33:25 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Liu, Jinsong <jinsong.liu@xxxxxxxxx>
# Date 1348654418 -7200
# Node ID 56fb977ce6eb4626a02d4a7a34e85009bb8ee3e0
# Parent  c47ef9592fb39325e33f8406b4bd736cc84482e5
x86: Save/restore TSC adjust during HVM guest migration

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r c47ef9592fb3 -r 56fb977ce6eb tools/misc/xen-hvmctx.c
--- a/tools/misc/xen-hvmctx.c   Wed Sep 26 12:12:42 2012 +0200
+++ b/tools/misc/xen-hvmctx.c   Wed Sep 26 12:13:38 2012 +0200
@@ -392,6 +392,13 @@ static void dump_vmce_vcpu(void)
     printf("    VMCE_VCPU: bank1 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank1);
 }
 
+static void dump_tsc_adjust(void)
+{
+    HVM_SAVE_TYPE(TSC_ADJUST) p;
+    READ(p);
+    printf("    TSC_ADJUST: tsc_adjust %" PRIx64 "\n", p.tsc_adjust);
+}
+
 int main(int argc, char **argv)
 {
     int entry, domid;
@@ -459,6 +466,7 @@ int main(int argc, char **argv)
         case HVM_SAVE_CODE(VIRIDIAN_DOMAIN): dump_viridian_domain(); break;
         case HVM_SAVE_CODE(VIRIDIAN_VCPU): dump_viridian_vcpu(); break;
         case HVM_SAVE_CODE(VMCE_VCPU): dump_vmce_vcpu(); break;
+        case HVM_SAVE_CODE(TSC_ADJUST): dump_tsc_adjust(); break;
         case HVM_SAVE_CODE(END): break;
         default:
             printf(" ** Don't understand type %u: skipping\n",
diff -r c47ef9592fb3 -r 56fb977ce6eb xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Sep 26 12:12:42 2012 +0200
+++ b/xen/arch/x86/hvm/hvm.c    Wed Sep 26 12:13:38 2012 +0200
@@ -603,6 +603,46 @@ void hvm_domain_destroy(struct domain *d
     hvm_destroy_cacheattr_region_list(d);
 }
 
+static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    struct hvm_tsc_adjust ctxt;
+    int err = 0;
+
+    for_each_vcpu ( d, v )
+    {
+        ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+        err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
+        if ( err )
+            break;
+    }
+
+    return err;
+}
+
+static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
+{
+    unsigned int vcpuid = hvm_load_instance(h);
+    struct vcpu *v;
+    struct hvm_tsc_adjust ctxt;
+
+    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
+                d->domain_id, vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(TSC_ADJUST, h, &ctxt) != 0 )
+        return -EINVAL;
+
+    v->arch.hvm_vcpu.msr_tsc_adjust = ctxt.tsc_adjust;
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
+                          hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
diff -r c47ef9592fb3 -r 56fb977ce6eb xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h    Wed Sep 26 12:12:42 2012 +0200
+++ b/xen/include/public/arch-x86/hvm/save.h    Wed Sep 26 12:13:38 2012 +0200
@@ -583,9 +583,15 @@ struct hvm_vmce_vcpu {
 
 DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
 
+struct hvm_tsc_adjust {
+    uint64_t tsc_adjust;
+};
+
+DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 18
+#define HVM_SAVE_CODE_MAX 19
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.