[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] tools/libxc: Restore CPUID/MSR data found in the migration stream



commit 1a6be420e04441bbf5f03968ab43a7908167fcb5
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Dec 20 19:38:26 2019 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri May 29 17:33:03 2020 +0100

    tools/libxc: Restore CPUID/MSR data found in the migration stream
    
    With all other pieces in place, it is now safe to restore the CPUID and MSR
    data in the migration stream, rather than discarding them and using the 
higher
    level toolstacks compatibility logic.
    
    While this is a small patch, it has large implications for migrated/resumed
    domains.  Most obviously, the CPU family/model/stepping data,
    cache/tlb/etc. will no longer change behind the guests back.
    
    Another change is the interpretation of the Xend cpuid strings.  The 'k'
    option is not a sensible thing to have ever supported, and 's' is how how 
the
    stream will end up behaving.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
 tools/libxc/xc_cpuid_x86.c     |  8 ++++----
 tools/libxc/xc_sr_common_x86.c | 26 ++++++++++++++++++++++++--
 2 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index f045b03223..89d2ecdad2 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -273,10 +273,9 @@ int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t 
domid,
  *   '0' -> force to 0
  *   'x' -> we don't care (use default)
  *   'k' -> pass through host value
- *   's' -> pass through the first time and then keep the same value
- *          across save/restore and migration.
+ *   's' -> legacy alias for 'k'
  *
- * For 's' and 'x' the configuration is overwritten with the value applied.
+ * In all cases, the returned string consists of just '0' and '1'.
  */
 int xc_cpuid_set(
     xc_interface *xch, uint32_t domid, const unsigned int *input,
@@ -402,7 +401,8 @@ int xc_cpuid_set(
                 clear_feature(31 - j, regs[i]);
 
             config_transformed[i][j] = config[i][j];
-            if ( config[i][j] == 's' )
+            /* All non 0/1 values get overwritten. */
+            if ( (config[i][j] & ~1) != '0' )
                 config_transformed[i][j] = '0' + val;
         }
     }
diff --git a/tools/libxc/xc_sr_common_x86.c b/tools/libxc/xc_sr_common_x86.c
index a849891634..77ea044a74 100644
--- a/tools/libxc/xc_sr_common_x86.c
+++ b/tools/libxc/xc_sr_common_x86.c
@@ -134,8 +134,30 @@ int handle_x86_msr_policy(struct xc_sr_context *ctx, 
struct xc_sr_record *rec)
 
 int x86_static_data_complete(struct xc_sr_context *ctx, unsigned int *missing)
 {
-    /* TODO: Become conditional on there being no data in the stream. */
-    *missing = XGR_SDD_MISSING_MSR | XGR_SDD_MISSING_CPUID;
+    xc_interface *xch = ctx->xch;
+    uint32_t nr_leaves = 0, nr_msrs = 0;
+    uint32_t err_l = ~0, err_s = ~0, err_m = ~0;
+
+    if ( ctx->x86.restore.cpuid.ptr )
+        nr_leaves = ctx->x86.restore.cpuid.size / sizeof(xen_cpuid_leaf_t);
+    else
+        *missing |= XGR_SDD_MISSING_CPUID;
+
+    if ( ctx->x86.restore.msr.ptr )
+        nr_msrs = ctx->x86.restore.msr.size / sizeof(xen_msr_entry_t);
+    else
+        *missing |= XGR_SDD_MISSING_MSR;
+
+    if ( (nr_leaves || nr_msrs) &&
+         xc_set_domain_cpu_policy(xch, ctx->domid,
+                                  nr_leaves, ctx->x86.restore.cpuid.ptr,
+                                  nr_msrs,   ctx->x86.restore.msr.ptr,
+                                  &err_l, &err_s, &err_m) )
+    {
+        PERROR("Failed to set CPUID policy: leaf %08x, subleaf %08x, msr %08x",
+               err_l, err_s, err_m);
+        return -1;
+    }
 
     return 0;
 }
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.