[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: vMCE save and restore


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Thu, 27 Sep 2012 22:33:19 +0000
  • Delivery-date: Thu, 27 Sep 2012 22:33:24 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Liu, Jinsong <jinsong.liu@xxxxxxxxx>
# Date 1348653955 -7200
# Node ID 07d0d5b3a0054d2534f09bcf90437678df2bfc54
# Parent  8f8fabafec86bd0e5ba65a0c2ea28027eb1278eb
x86: vMCE save and restore

This patch provides vMCE save/restore when migration.
1. MCG_CAP is well-defined. However, considering future cap extension,
   we keep save/restore logic that Jan implement at c/s 24887;
2. MCi_CTL2 initialized by guestos when booting, so need save/restore
   otherwise guest would surprise;
3. Other MSRs do not need save/restore since they are either error-
   related and pointless to save/restore, or, unified among all vMCE
   platform;

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>

- fix handling of partial data in XEN_DOMCTL_set_ext_vcpucontext
- fix adjustment of xen_domctl_ext_vcpucontext

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 8f8fabafec86 -r 07d0d5b3a005 tools/misc/xen-hvmctx.c
--- a/tools/misc/xen-hvmctx.c   Wed Sep 26 12:05:10 2012 +0200
+++ b/tools/misc/xen-hvmctx.c   Wed Sep 26 12:05:55 2012 +0200
@@ -388,6 +388,8 @@ static void dump_vmce_vcpu(void)
     HVM_SAVE_TYPE(VMCE_VCPU) p;
     READ(p);
     printf("    VMCE_VCPU: caps %" PRIx64 "\n", p.caps);
+    printf("    VMCE_VCPU: bank0 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank0);
+    printf("    VMCE_VCPU: bank1 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank1);
 }
 
 int main(int argc, char **argv)
diff -r 8f8fabafec86 -r 07d0d5b3a005 xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Wed Sep 26 12:05:10 2012 +0200
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Wed Sep 26 12:05:55 2012 +0200
@@ -68,7 +68,7 @@ void vmce_init_vcpu(struct vcpu *v)
     spin_lock_init(&v->arch.vmce.lock);
 }
 
-int vmce_restore_vcpu(struct vcpu *v, uint64_t caps)
+int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt)
 {
     unsigned long guest_mcg_cap;
 
@@ -77,16 +77,20 @@ int vmce_restore_vcpu(struct vcpu *v, ui
     else
         guest_mcg_cap = AMD_GUEST_MCG_CAP;
 
-    if ( caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
+    if ( ctxt->caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
     {
         dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
                 " %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n",
-                is_hvm_vcpu(v) ? "HVM" : "PV", caps, v->domain->domain_id,
-                v->vcpu_id, guest_mcg_cap & ~MCG_CAP_COUNT);
+                is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
+                v->domain->domain_id, v->vcpu_id,
+                guest_mcg_cap & ~MCG_CAP_COUNT);
         return -EPERM;
     }
 
-    v->arch.vmce.mcg_cap = caps;
+    v->arch.vmce.mcg_cap = ctxt->caps;
+    v->arch.vmce.bank[0].mci_ctl2 = ctxt->mci_ctl2_bank0;
+    v->arch.vmce.bank[1].mci_ctl2 = ctxt->mci_ctl2_bank1;
+
     return 0;
 }
 
@@ -291,7 +295,9 @@ static int vmce_save_vcpu_ctxt(struct do
 
     for_each_vcpu( d, v ) {
         struct hvm_vmce_vcpu ctxt = {
-            .caps = v->arch.vmce.mcg_cap
+            .caps = v->arch.vmce.mcg_cap,
+            .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
+            .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2
         };
 
         err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
@@ -316,9 +322,9 @@ static int vmce_load_vcpu_ctxt(struct do
         err = -EINVAL;
     }
     else
-        err = hvm_load_entry(VMCE_VCPU, h, &ctxt);
+        err = hvm_load_entry_zeroextend(VMCE_VCPU, h, &ctxt);
 
-    return err ?: vmce_restore_vcpu(v, ctxt.caps);
+    return err ?: vmce_restore_vcpu(v, &ctxt);
 }
 
 HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt,
diff -r 8f8fabafec86 -r 07d0d5b3a005 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Sep 26 12:05:10 2012 +0200
+++ b/xen/arch/x86/domctl.c     Wed Sep 26 12:05:55 2012 +0200
@@ -1066,12 +1066,14 @@ long arch_do_domctl(
                 evc->syscall32_callback_eip    = 0;
                 evc->syscall32_disables_events = 0;
             }
-            evc->mcg_cap = v->arch.vmce.mcg_cap;
+            evc->vmce.caps = v->arch.vmce.mcg_cap;
+            evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+            evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
         }
         else
         {
             ret = -EINVAL;
-            if ( evc->size < offsetof(typeof(*evc), mcg_cap) )
+            if ( evc->size < offsetof(typeof(*evc), vmce) )
                 goto ext_vcpucontext_out;
             if ( !is_hvm_domain(d) )
             {
@@ -1101,9 +1103,21 @@ long arch_do_domctl(
                  evc->syscall32_callback_eip )
                 goto ext_vcpucontext_out;
 
-            if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
-                              sizeof(evc->mcg_cap) )
-                ret = vmce_restore_vcpu(v, evc->mcg_cap);
+            BUILD_BUG_ON(offsetof(struct xen_domctl_ext_vcpucontext,
+                                  mcg_cap) !=
+                         offsetof(struct xen_domctl_ext_vcpucontext,
+                                  vmce.caps));
+            BUILD_BUG_ON(sizeof(evc->mcg_cap) != sizeof(evc->vmce.caps));
+            if ( evc->size >= offsetof(typeof(*evc), vmce) +
+                              sizeof(evc->vmce) )
+                ret = vmce_restore_vcpu(v, &evc->vmce);
+            else if ( evc->size >= offsetof(typeof(*evc), mcg_cap) +
+                                   sizeof(evc->mcg_cap) )
+            {
+                struct hvm_vmce_vcpu vmce = { .caps = evc->mcg_cap };
+
+                ret = vmce_restore_vcpu(v, &vmce);
+            }
         }
 
         ret = 0;
diff -r 8f8fabafec86 -r 07d0d5b3a005 xen/include/asm-x86/mce.h
--- a/xen/include/asm-x86/mce.h Wed Sep 26 12:05:10 2012 +0200
+++ b/xen/include/asm-x86/mce.h Wed Sep 26 12:05:55 2012 +0200
@@ -33,7 +33,7 @@ struct vmce {
 
 /* Guest vMCE MSRs virtualization */
 extern void vmce_init_vcpu(struct vcpu *);
-extern int vmce_restore_vcpu(struct vcpu *, uint64_t caps);
+extern int vmce_restore_vcpu(struct vcpu *, const struct hvm_vmce_vcpu *);
 extern int vmce_wrmsr(uint32_t msr, uint64_t val);
 extern int vmce_rdmsr(uint32_t msr, uint64_t *val);
 
diff -r 8f8fabafec86 -r 07d0d5b3a005 xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h    Wed Sep 26 12:05:10 2012 +0200
+++ b/xen/include/public/arch-x86/hvm/save.h    Wed Sep 26 12:05:55 2012 +0200
@@ -577,6 +577,8 @@ DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17,
 
 struct hvm_vmce_vcpu {
     uint64_t caps;
+    uint64_t mci_ctl2_bank0;
+    uint64_t mci_ctl2_bank1;
 };
 
 DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
diff -r 8f8fabafec86 -r 07d0d5b3a005 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Sep 26 12:05:10 2012 +0200
+++ b/xen/include/public/domctl.h       Wed Sep 26 12:05:55 2012 +0200
@@ -32,6 +32,7 @@
 #error "domctl operations are intended for use by node control tools only"
 #endif
 
+#include <xen/hvm/save.h>
 #include "xen.h"
 #include "grant_table.h"
 
@@ -564,7 +565,14 @@ struct xen_domctl_ext_vcpucontext {
     uint16_t         sysenter_callback_cs;
     uint8_t          syscall32_disables_events;
     uint8_t          sysenter_disables_events;
-    uint64_aligned_t mcg_cap;
+#if defined(__GNUC__)
+    union {
+        uint64_aligned_t mcg_cap;
+        struct hvm_vmce_vcpu vmce;
+    };
+#else
+    struct hvm_vmce_vcpu vmce;
+#endif
 #endif
 };
 typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.