[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Fix save/restore for HVM domains with viridian=1



# HG changeset patch
# User Paul Durrant <paul.durrant@xxxxxxxxxx>
# Date 1322235041 0
# Node ID 373bd877cac3a42482074a2d3393d0c78bef7e59
# Parent  67f70841e05853aba35bd97792f34a0569020639
Fix save/restore for HVM domains with viridian=1

xc_domain_save/restore currently pay no attention to
HVM_PARAM_VIRIDIAN which results in an HVM domain running a recent
version on Windows (post-Vista) locking up on a domain restore due to
EOIs (done via a viridian MSR write) being silently dropped.  This
patch adds an extra save entry for the viridian parameter and also
adds code in the viridian kernel module to catch attempted use of
viridian functionality when the HVM parameter has not been set.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 67f70841e058 -r 373bd877cac3 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Fri Nov 25 13:31:58 2011 +0000
+++ b/tools/libxc/xc_domain_restore.c   Fri Nov 25 15:30:41 2011 +0000
@@ -675,6 +675,7 @@
     uint64_t vm86_tss;
     uint64_t console_pfn;
     uint64_t acpi_ioport_location;
+    uint64_t viridian;
 } pagebuf_t;
 
 static int pagebuf_init(pagebuf_t* buf)
@@ -809,6 +810,16 @@
         }
         return pagebuf_get_one(xch, ctx, buf, fd, dom);
 
+    case XC_SAVE_ID_HVM_VIRIDIAN:
+        /* Skip padding 4 bytes then read the acpi ioport location. */
+        if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
+        {
+            PERROR("error read the viridian flag");
+            return -1;
+        }
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
     default:
         if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
             ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1440,6 +1451,9 @@
             fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
     }
 
+    if (pagebuf.viridian != 0)
+        xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+
     if (pagebuf.acpi_ioport_location == 1) {
         DBGPRINTF("Use new firmware ioport from the checkpoint\n");
         xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff -r 67f70841e058 -r 373bd877cac3 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Fri Nov 25 13:31:58 2011 +0000
+++ b/tools/libxc/xc_domain_save.c      Fri Nov 25 15:30:41 2011 +0000
@@ -1506,6 +1506,18 @@
             PERROR("Error when writing the firmware ioport version");
             goto out;
         }
+
+        chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the viridian flag");
+            goto out;
+        }
     }
 
     if ( !callbacks->checkpoint )
diff -r 67f70841e058 -r 373bd877cac3 tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h     Fri Nov 25 13:31:58 2011 +0000
+++ b/tools/libxc/xg_save_restore.h     Fri Nov 25 15:30:41 2011 +0000
@@ -134,6 +134,7 @@
 #define XC_SAVE_ID_HVM_CONSOLE_PFN    -8 /* (HVM-only) */
 #define XC_SAVE_ID_LAST_CHECKPOINT    -9 /* Commit to restoring after 
completion of current iteration. */
 #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
+#define XC_SAVE_ID_HVM_VIRIDIAN       -11
 
 /*
 ** We process save/restore/migrate in batches of pages; the below
diff -r 67f70841e058 -r 373bd877cac3 xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c       Fri Nov 25 13:31:58 2011 +0000
+++ b/xen/arch/x86/hvm/viridian.c       Fri Nov 25 15:30:41 2011 +0000
@@ -206,8 +206,11 @@
     struct vcpu *v = current;
     struct domain *d = v->domain;
 
-    if ( !is_viridian_domain(d) )
+    if ( !is_viridian_domain(d) ) {
+        gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
+                 d->domain_id);
         return 0;
+    }
 
     switch ( idx )
     {
@@ -271,8 +274,11 @@
     struct vcpu *v = current;
     struct domain *d = v->domain;
     
-    if ( !is_viridian_domain(d) )
+    if ( !is_viridian_domain(d) ) {
+        gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
+                 d->domain_id);
         return 0;
+    }
 
     switch ( idx )
     {
@@ -411,6 +417,8 @@
     if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
+    ASSERT(is_viridian_domain(d));
+
     d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
     d->arch.hvm_domain.viridian.guest_os_id.raw   = ctxt.guest_os_id;
 
@@ -455,6 +463,8 @@
     if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
         return -EINVAL;
 
+    ASSERT(is_viridian_domain(d));
+
     v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist;
 
     return 0;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.