[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/12] libxc/migration: Adjust layout of struct xc_sr_context



We are shortly going to want to introduce some common x86 fields, so having
x86_pv and x86_hvm as the top level objects is a problem.  Insert a
surrounding struct x86 and drop the x86 prefix from the pv/hvm objects.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Ian Jackson <Ian.Jackson@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>

This is much more easily reviewed with git-diff's --ignore-all-space option,
which highlights the relevant struct difference.

  diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
  index 4963b30c4b..2d02e80ee3 100644
  --- a/tools/libxc/xc_sr_common.h
  +++ b/tools/libxc/xc_sr_common.h
  @@ -281,6 +281,8 @@ struct xc_sr_context

       union /* Guest-arch specific data. */
       {
  +        struct /* x86 */
  +        {
               struct /* x86 PV guest. */
               {
                   /* 4 or 8; 32 or 64 bit domain */
  @@ -332,7 +334,7 @@ struct xc_sr_context
                           unsigned int nr_vcpus;
                       } restore;
                   };
  -        } x86_pv;
  +            } pv;

               struct /* x86 HVM guest. */
               {
  @@ -351,7 +353,9 @@ struct xc_sr_context
                           struct xc_sr_blob context;
                       } restore;
                   };
  -        } x86_hvm;
  +            } hvm;
  +
  +        } x86;
       };
   };
---
 tools/libxc/xc_sr_common.h          | 132 ++++++++++-----------
 tools/libxc/xc_sr_common_x86_pv.c   |  50 ++++----
 tools/libxc/xc_sr_common_x86_pv.h   |   4 +-
 tools/libxc/xc_sr_restore_x86_hvm.c |  12 +-
 tools/libxc/xc_sr_restore_x86_pv.c  | 224 ++++++++++++++++++------------------
 tools/libxc/xc_sr_save_x86_hvm.c    |   4 +-
 tools/libxc/xc_sr_save_x86_pv.c     | 158 ++++++++++++-------------
 7 files changed, 294 insertions(+), 290 deletions(-)

diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index 4db63a63b2..0289c01e13 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -287,77 +287,81 @@ struct xc_sr_context
 
     union /* Guest-arch specific data. */
     {
-        struct /* x86 PV guest. */
+        struct /* x86 */
         {
-            /* 4 or 8; 32 or 64 bit domain */
-            unsigned int width;
-            /* 3 or 4 pagetable levels */
-            unsigned int levels;
-
-            /* Maximum Xen frame */
-            xen_pfn_t max_mfn;
-            /* Read-only machine to phys map */
-            xen_pfn_t *m2p;
-            /* first mfn of the compat m2p (Only needed for 32bit PV guests) */
-            xen_pfn_t compat_m2p_mfn0;
-            /* Number of m2p frames mapped */
-            unsigned long nr_m2p_frames;
-
-            /* Maximum guest frame */
-            xen_pfn_t max_pfn;
-
-            /* Number of frames making up the p2m */
-            unsigned int p2m_frames;
-            /* Guest's phys to machine map.  Mapped read-only (save) or
-             * allocated locally (restore).  Uses guest unsigned longs. */
-            void *p2m;
-            /* The guest pfns containing the p2m leaves */
-            xen_pfn_t *p2m_pfns;
-
-            /* Read-only mapping of guests shared info page */
-            shared_info_any_t *shinfo;
-
-            /* p2m generation count for verifying validity of local p2m. */
-            uint64_t p2m_generation;
-
-            union
+            struct /* x86 PV guest. */
             {
-                struct
+                /* 4 or 8; 32 or 64 bit domain */
+                unsigned int width;
+                /* 3 or 4 pagetable levels */
+                unsigned int levels;
+
+                /* Maximum Xen frame */
+                xen_pfn_t max_mfn;
+                /* Read-only machine to phys map */
+                xen_pfn_t *m2p;
+                /* first mfn of the compat m2p (Only needed for 32bit PV 
guests) */
+                xen_pfn_t compat_m2p_mfn0;
+                /* Number of m2p frames mapped */
+                unsigned long nr_m2p_frames;
+
+                /* Maximum guest frame */
+                xen_pfn_t max_pfn;
+
+                /* Number of frames making up the p2m */
+                unsigned int p2m_frames;
+                /* Guest's phys to machine map.  Mapped read-only (save) or
+                 * allocated locally (restore).  Uses guest unsigned longs. */
+                void *p2m;
+                /* The guest pfns containing the p2m leaves */
+                xen_pfn_t *p2m_pfns;
+
+                /* Read-only mapping of guests shared info page */
+                shared_info_any_t *shinfo;
+
+                /* p2m generation count for verifying validity of local p2m. */
+                uint64_t p2m_generation;
+
+                union
                 {
-                    /* State machine for the order of received records. */
-                    bool seen_pv_info;
-
-                    /* Types for each page (bounded by max_pfn). */
-                    uint32_t *pfn_types;
-
-                    /* x86 PV per-vcpu storage structure for blobs. */
-                    struct xc_sr_x86_pv_restore_vcpu
+                    struct
                     {
-                        struct xc_sr_blob basic, extd, xsave, msr;
-                    } *vcpus;
-                    unsigned int nr_vcpus;
-                } restore;
-            };
-        } x86_pv;
-
-        struct /* x86 HVM guest. */
-        {
-            union
+                        /* State machine for the order of received records. */
+                        bool seen_pv_info;
+
+                        /* Types for each page (bounded by max_pfn). */
+                        uint32_t *pfn_types;
+
+                        /* x86 PV per-vcpu storage structure for blobs. */
+                        struct xc_sr_x86_pv_restore_vcpu
+                        {
+                            struct xc_sr_blob basic, extd, xsave, msr;
+                        } *vcpus;
+                        unsigned int nr_vcpus;
+                    } restore;
+                };
+            } pv;
+
+            struct /* x86 HVM guest. */
             {
-                struct
+                union
                 {
-                    /* Whether qemu enabled logdirty mode, and we should
-                     * disable on cleanup. */
-                    bool qemu_enabled_logdirty;
-                } save;
+                    struct
+                    {
+                        /* Whether qemu enabled logdirty mode, and we should
+                         * disable on cleanup. */
+                        bool qemu_enabled_logdirty;
+                    } save;
 
-                struct
-                {
-                    /* HVM context blob. */
-                    struct xc_sr_blob context;
-                } restore;
-            };
-        } x86_hvm;
+                    struct
+                    {
+                        /* HVM context blob. */
+                        struct xc_sr_blob context;
+                    } restore;
+                };
+            } hvm;
+
+        } x86;
     };
 };
 
diff --git a/tools/libxc/xc_sr_common_x86_pv.c 
b/tools/libxc/xc_sr_common_x86_pv.c
index ec433fad70..d3d425cb82 100644
--- a/tools/libxc/xc_sr_common_x86_pv.c
+++ b/tools/libxc/xc_sr_common_x86_pv.c
@@ -4,16 +4,16 @@
 
 xen_pfn_t mfn_to_pfn(struct xc_sr_context *ctx, xen_pfn_t mfn)
 {
-    assert(mfn <= ctx->x86_pv.max_mfn);
-    return ctx->x86_pv.m2p[mfn];
+    assert(mfn <= ctx->x86.pv.max_mfn);
+    return ctx->x86.pv.m2p[mfn];
 }
 
 bool mfn_in_pseudophysmap(struct xc_sr_context *ctx, xen_pfn_t mfn)
 {
-    return ((mfn <= ctx->x86_pv.max_mfn) &&
-            (mfn_to_pfn(ctx, mfn) <= ctx->x86_pv.max_pfn) &&
-            (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86_pv.p2m,
-                           ctx->x86_pv.width) == mfn));
+    return ((mfn <= ctx->x86.pv.max_mfn) &&
+            (mfn_to_pfn(ctx, mfn) <= ctx->x86.pv.max_pfn) &&
+            (xc_pfn_to_mfn(mfn_to_pfn(ctx, mfn), ctx->x86.pv.p2m,
+                           ctx->x86.pv.width) == mfn));
 }
 
 void dump_bad_pseudophysmap_entry(struct xc_sr_context *ctx, xen_pfn_t mfn)
@@ -21,23 +21,23 @@ void dump_bad_pseudophysmap_entry(struct xc_sr_context 
*ctx, xen_pfn_t mfn)
     xc_interface *xch = ctx->xch;
     xen_pfn_t pfn = ~0UL;
 
-    ERROR("mfn %#lx, max %#lx", mfn, ctx->x86_pv.max_mfn);
+    ERROR("mfn %#lx, max %#lx", mfn, ctx->x86.pv.max_mfn);
 
-    if ( (mfn != ~0UL) && (mfn <= ctx->x86_pv.max_mfn) )
+    if ( (mfn != ~0UL) && (mfn <= ctx->x86.pv.max_mfn) )
     {
-        pfn = ctx->x86_pv.m2p[mfn];
+        pfn = ctx->x86.pv.m2p[mfn];
         ERROR("  m2p[%#lx] = %#lx, max_pfn %#lx",
-              mfn, pfn, ctx->x86_pv.max_pfn);
+              mfn, pfn, ctx->x86.pv.max_pfn);
     }
 
-    if ( (pfn != ~0UL) && (pfn <= ctx->x86_pv.max_pfn) )
+    if ( (pfn != ~0UL) && (pfn <= ctx->x86.pv.max_pfn) )
         ERROR("  p2m[%#lx] = %#lx",
-              pfn, xc_pfn_to_mfn(pfn, ctx->x86_pv.p2m, ctx->x86_pv.width));
+              pfn, xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width));
 }
 
 xen_pfn_t cr3_to_mfn(struct xc_sr_context *ctx, uint64_t cr3)
 {
-    if ( ctx->x86_pv.width == 8 )
+    if ( ctx->x86.pv.width == 8 )
         return cr3 >> 12;
     else
     {
@@ -53,7 +53,7 @@ uint64_t mfn_to_cr3(struct xc_sr_context *ctx, xen_pfn_t _mfn)
 {
     uint64_t mfn = _mfn;
 
-    if ( ctx->x86_pv.width == 8 )
+    if ( ctx->x86.pv.width == 8 )
         return mfn << 12;
     else
     {
@@ -86,8 +86,8 @@ int x86_pv_domain_info(struct xc_sr_context *ctx)
         ERROR("Invalid guest width %d.  Expected 32 or 64", guest_width * 8);
         return -1;
     }
-    ctx->x86_pv.width = guest_width;
-    ctx->x86_pv.levels = guest_levels;
+    ctx->x86.pv.width = guest_width;
+    ctx->x86.pv.levels = guest_levels;
 
     DPRINTF("%d bits, %d levels", guest_width * 8, guest_levels);
 
@@ -108,9 +108,9 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
         goto err;
     }
 
-    ctx->x86_pv.max_mfn = max_page;
-    m2p_size   = M2P_SIZE(ctx->x86_pv.max_mfn);
-    m2p_chunks = M2P_CHUNKS(ctx->x86_pv.max_mfn);
+    ctx->x86.pv.max_mfn = max_page;
+    m2p_size   = M2P_SIZE(ctx->x86.pv.max_mfn);
+    m2p_chunks = M2P_CHUNKS(ctx->x86.pv.max_mfn);
 
     extents_start = malloc(m2p_chunks * sizeof(xen_pfn_t));
     if ( !extents_start )
@@ -137,27 +137,27 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
     for ( i = 0; i < m2p_chunks; ++i )
         entries[i].mfn = extents_start[i];
 
-    ctx->x86_pv.m2p = xc_map_foreign_ranges(
+    ctx->x86.pv.m2p = xc_map_foreign_ranges(
         xch, DOMID_XEN, m2p_size, PROT_READ,
         M2P_CHUNK_SIZE, entries, m2p_chunks);
 
-    if ( !ctx->x86_pv.m2p )
+    if ( !ctx->x86.pv.m2p )
     {
         PERROR("Failed to mmap() m2p ranges");
         goto err;
     }
 
-    ctx->x86_pv.nr_m2p_frames = (M2P_CHUNK_SIZE >> PAGE_SHIFT) * m2p_chunks;
+    ctx->x86.pv.nr_m2p_frames = (M2P_CHUNK_SIZE >> PAGE_SHIFT) * m2p_chunks;
 
 #ifdef __i386__
     /* 32 bit toolstacks automatically get the compat m2p */
-    ctx->x86_pv.compat_m2p_mfn0 = entries[0].mfn;
+    ctx->x86.pv.compat_m2p_mfn0 = entries[0].mfn;
 #else
     /* 64 bit toolstacks need to ask Xen specially for it */
     {
         struct xen_machphys_mfn_list xmml = {
             .max_extents = 1,
-            .extent_start = { &ctx->x86_pv.compat_m2p_mfn0 },
+            .extent_start = { &ctx->x86.pv.compat_m2p_mfn0 },
         };
 
         rc = do_memory_op(xch, XENMEM_machphys_compat_mfn_list,
@@ -173,7 +173,7 @@ int x86_pv_map_m2p(struct xc_sr_context *ctx)
 
     /* All Done */
     rc = 0;
-    DPRINTF("max_mfn %#lx", ctx->x86_pv.max_mfn);
+    DPRINTF("max_mfn %#lx", ctx->x86.pv.max_mfn);
 
  err:
     free(entries);
diff --git a/tools/libxc/xc_sr_common_x86_pv.h 
b/tools/libxc/xc_sr_common_x86_pv.h
index f80c75349a..2ed03309af 100644
--- a/tools/libxc/xc_sr_common_x86_pv.h
+++ b/tools/libxc/xc_sr_common_x86_pv.h
@@ -73,7 +73,7 @@ static inline uint64_t merge_pte(uint64_t pte, xen_pfn_t mfn)
 /*
  * Get current domain information.
  *
- * Fills ctx->x86_pv
+ * Fills ctx->x86.pv
  * - .width
  * - .levels
  * - .fpp
@@ -89,7 +89,7 @@ int x86_pv_domain_info(struct xc_sr_context *ctx);
 /*
  * Maps the Xen M2P.
  *
- * Fills ctx->x86_pv.
+ * Fills ctx->x86.pv.
  * - .max_mfn
  * - .m2p
  *
diff --git a/tools/libxc/xc_sr_restore_x86_hvm.c 
b/tools/libxc/xc_sr_restore_x86_hvm.c
index fe7be9bde6..3f78248f32 100644
--- a/tools/libxc/xc_sr_restore_x86_hvm.c
+++ b/tools/libxc/xc_sr_restore_x86_hvm.c
@@ -10,7 +10,7 @@ static int handle_hvm_context(struct xc_sr_context *ctx,
                               struct xc_sr_record *rec)
 {
     xc_interface *xch = ctx->xch;
-    int rc = update_blob(&ctx->x86_hvm.restore.context, rec->data, 
rec->length);
+    int rc = update_blob(&ctx->x86.hvm.restore.context, rec->data, 
rec->length);
 
     if ( rc )
         ERROR("Unable to allocate %u bytes for hvm context", rec->length);
@@ -129,14 +129,14 @@ static int x86_hvm_setup(struct xc_sr_context *ctx)
 
     if ( ctx->restore.guest_type != DHDR_TYPE_X86_HVM )
     {
-        ERROR("Unable to restore %s domain into an x86_hvm domain",
+        ERROR("Unable to restore %s domain into an x86 HVM domain",
               dhdr_type_to_str(ctx->restore.guest_type));
         return -1;
     }
 
     if ( ctx->restore.guest_page_size != PAGE_SIZE )
     {
-        ERROR("Invalid page size %u for x86_hvm domains",
+        ERROR("Invalid page size %u for x86 HVM domains",
               ctx->restore.guest_page_size);
         return -1;
     }
@@ -201,8 +201,8 @@ static int x86_hvm_stream_complete(struct xc_sr_context 
*ctx)
     }
 
     rc = xc_domain_hvm_setcontext(xch, ctx->domid,
-                                  ctx->x86_hvm.restore.context.ptr,
-                                  ctx->x86_hvm.restore.context.size);
+                                  ctx->x86.hvm.restore.context.ptr,
+                                  ctx->x86.hvm.restore.context.size);
     if ( rc < 0 )
     {
         PERROR("Unable to restore HVM context");
@@ -225,7 +225,7 @@ static int x86_hvm_stream_complete(struct xc_sr_context 
*ctx)
 
 static int x86_hvm_cleanup(struct xc_sr_context *ctx)
 {
-    free(ctx->x86_hvm.restore.context.ptr);
+    free(ctx->x86.hvm.restore.context.ptr);
 
     return 0;
 }
diff --git a/tools/libxc/xc_sr_restore_x86_pv.c 
b/tools/libxc/xc_sr_restore_x86_pv.c
index 0ec506632a..8f61a5e8b9 100644
--- a/tools/libxc/xc_sr_restore_x86_pv.c
+++ b/tools/libxc/xc_sr_restore_x86_pv.c
@@ -4,9 +4,9 @@
 
 static xen_pfn_t pfn_to_mfn(const struct xc_sr_context *ctx, xen_pfn_t pfn)
 {
-    assert(pfn <= ctx->x86_pv.max_pfn);
+    assert(pfn <= ctx->x86.pv.max_pfn);
 
-    return xc_pfn_to_mfn(pfn, ctx->x86_pv.p2m, ctx->x86_pv.width);
+    return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width);
 }
 
 /*
@@ -18,8 +18,8 @@ static xen_pfn_t pfn_to_mfn(const struct xc_sr_context *ctx, 
xen_pfn_t pfn)
 static int expand_p2m(struct xc_sr_context *ctx, unsigned long max_pfn)
 {
     xc_interface *xch = ctx->xch;
-    unsigned long old_max = ctx->x86_pv.max_pfn, i;
-    unsigned int fpp = PAGE_SIZE / ctx->x86_pv.width;
+    unsigned long old_max = ctx->x86.pv.max_pfn, i;
+    unsigned int fpp = PAGE_SIZE / ctx->x86.pv.width;
     unsigned long end_frame = (max_pfn / fpp) + 1;
     unsigned long old_end_frame = (old_max / fpp) + 1;
     xen_pfn_t *p2m = NULL, *p2m_pfns = NULL;
@@ -28,35 +28,35 @@ static int expand_p2m(struct xc_sr_context *ctx, unsigned 
long max_pfn)
 
     assert(max_pfn > old_max);
 
-    p2msz = (max_pfn + 1) * ctx->x86_pv.width;
-    p2m = realloc(ctx->x86_pv.p2m, p2msz);
+    p2msz = (max_pfn + 1) * ctx->x86.pv.width;
+    p2m = realloc(ctx->x86.pv.p2m, p2msz);
     if ( !p2m )
     {
         ERROR("Failed to (re)alloc %zu bytes for p2m", p2msz);
         return -1;
     }
-    ctx->x86_pv.p2m = p2m;
+    ctx->x86.pv.p2m = p2m;
 
     pfn_typesz = (max_pfn + 1) * sizeof(*pfn_types);
-    pfn_types = realloc(ctx->x86_pv.restore.pfn_types, pfn_typesz);
+    pfn_types = realloc(ctx->x86.pv.restore.pfn_types, pfn_typesz);
     if ( !pfn_types )
     {
         ERROR("Failed to (re)alloc %zu bytes for pfn_types", pfn_typesz);
         return -1;
     }
-    ctx->x86_pv.restore.pfn_types = pfn_types;
+    ctx->x86.pv.restore.pfn_types = pfn_types;
 
     p2m_pfnsz = (end_frame + 1) * sizeof(*p2m_pfns);
-    p2m_pfns = realloc(ctx->x86_pv.p2m_pfns, p2m_pfnsz);
+    p2m_pfns = realloc(ctx->x86.pv.p2m_pfns, p2m_pfnsz);
     if ( !p2m_pfns )
     {
         ERROR("Failed to (re)alloc %zu bytes for p2m frame list", p2m_pfnsz);
         return -1;
     }
-    ctx->x86_pv.p2m_frames = end_frame;
-    ctx->x86_pv.p2m_pfns = p2m_pfns;
+    ctx->x86.pv.p2m_frames = end_frame;
+    ctx->x86.pv.p2m_pfns = p2m_pfns;
 
-    ctx->x86_pv.max_pfn = max_pfn;
+    ctx->x86.pv.max_pfn = max_pfn;
     for ( i = (old_max ? old_max + 1 : 0); i <= max_pfn; ++i )
     {
         ctx->restore.ops.set_gfn(ctx, i, INVALID_MFN);
@@ -64,7 +64,7 @@ static int expand_p2m(struct xc_sr_context *ctx, unsigned 
long max_pfn)
     }
 
     for ( i = (old_end_frame ? old_end_frame + 1 : 0); i <= end_frame; ++i )
-        ctx->x86_pv.p2m_pfns[i] = INVALID_MFN;
+        ctx->x86.pv.p2m_pfns[i] = INVALID_MFN;
 
     DPRINTF("Changed max_pfn from %#lx to %#lx", old_max, max_pfn);
     return 0;
@@ -79,13 +79,13 @@ static int pin_pagetables(struct xc_sr_context *ctx)
     unsigned long i, nr_pins;
     struct mmuext_op pin[MAX_PIN_BATCH];
 
-    for ( i = nr_pins = 0; i <= ctx->x86_pv.max_pfn; ++i )
+    for ( i = nr_pins = 0; i <= ctx->x86.pv.max_pfn; ++i )
     {
-        if ( (ctx->x86_pv.restore.pfn_types[i] &
+        if ( (ctx->x86.pv.restore.pfn_types[i] &
               XEN_DOMCTL_PFINFO_LPINTAB) == 0 )
             continue;
 
-        switch ( (ctx->x86_pv.restore.pfn_types[i] &
+        switch ( (ctx->x86.pv.restore.pfn_types[i] &
                   XEN_DOMCTL_PFINFO_LTABTYPE_MASK) )
         {
         case XEN_DOMCTL_PFINFO_L1TAB:
@@ -138,18 +138,18 @@ static int process_start_info(struct xc_sr_context *ctx,
     start_info_any_t *guest_start_info = NULL;
     int rc = -1;
 
-    pfn = GET_FIELD(vcpu, user_regs.edx, ctx->x86_pv.width);
+    pfn = GET_FIELD(vcpu, user_regs.edx, ctx->x86.pv.width);
 
-    if ( pfn > ctx->x86_pv.max_pfn )
+    if ( pfn > ctx->x86.pv.max_pfn )
     {
         ERROR("Start Info pfn %#lx out of range", pfn);
         goto err;
     }
 
-    if ( ctx->x86_pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB )
+    if ( ctx->x86.pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB )
     {
         ERROR("Start Info pfn %#lx has bad type %u", pfn,
-              (ctx->x86_pv.restore.pfn_types[pfn] >>
+              (ctx->x86.pv.restore.pfn_types[pfn] >>
                XEN_DOMCTL_PFINFO_LTAB_SHIFT));
         goto err;
     }
@@ -162,7 +162,7 @@ static int process_start_info(struct xc_sr_context *ctx,
         goto err;
     }
 
-    SET_FIELD(vcpu, user_regs.edx, mfn, ctx->x86_pv.width);
+    SET_FIELD(vcpu, user_regs.edx, mfn, ctx->x86.pv.width);
     guest_start_info = xc_map_foreign_range(
         xch, ctx->domid, PAGE_SIZE, PROT_READ | PROT_WRITE, mfn);
     if ( !guest_start_info )
@@ -172,8 +172,8 @@ static int process_start_info(struct xc_sr_context *ctx,
     }
 
     /* Deal with xenstore stuff */
-    pfn = GET_FIELD(guest_start_info, store_mfn, ctx->x86_pv.width);
-    if ( pfn > ctx->x86_pv.max_pfn )
+    pfn = GET_FIELD(guest_start_info, store_mfn, ctx->x86.pv.width);
+    if ( pfn > ctx->x86.pv.max_pfn )
     {
         ERROR("XenStore pfn %#lx out of range", pfn);
         goto err;
@@ -188,13 +188,13 @@ static int process_start_info(struct xc_sr_context *ctx,
     }
 
     ctx->restore.xenstore_gfn = mfn;
-    SET_FIELD(guest_start_info, store_mfn, mfn, ctx->x86_pv.width);
+    SET_FIELD(guest_start_info, store_mfn, mfn, ctx->x86.pv.width);
     SET_FIELD(guest_start_info, store_evtchn,
-              ctx->restore.xenstore_evtchn, ctx->x86_pv.width);
+              ctx->restore.xenstore_evtchn, ctx->x86.pv.width);
 
     /* Deal with console stuff */
-    pfn = GET_FIELD(guest_start_info, console.domU.mfn, ctx->x86_pv.width);
-    if ( pfn > ctx->x86_pv.max_pfn )
+    pfn = GET_FIELD(guest_start_info, console.domU.mfn, ctx->x86.pv.width);
+    if ( pfn > ctx->x86.pv.max_pfn )
     {
         ERROR("Console pfn %#lx out of range", pfn);
         goto err;
@@ -209,16 +209,16 @@ static int process_start_info(struct xc_sr_context *ctx,
     }
 
     ctx->restore.console_gfn = mfn;
-    SET_FIELD(guest_start_info, console.domU.mfn, mfn, ctx->x86_pv.width);
+    SET_FIELD(guest_start_info, console.domU.mfn, mfn, ctx->x86.pv.width);
     SET_FIELD(guest_start_info, console.domU.evtchn,
-              ctx->restore.console_evtchn, ctx->x86_pv.width);
+              ctx->restore.console_evtchn, ctx->x86.pv.width);
 
     /* Set other information */
     SET_FIELD(guest_start_info, nr_pages,
-              ctx->x86_pv.max_pfn + 1, ctx->x86_pv.width);
+              ctx->x86.pv.max_pfn + 1, ctx->x86.pv.width);
     SET_FIELD(guest_start_info, shared_info,
-              ctx->dominfo.shared_info_frame << PAGE_SHIFT, ctx->x86_pv.width);
-    SET_FIELD(guest_start_info, flags, 0, ctx->x86_pv.width);
+              ctx->dominfo.shared_info_frame << PAGE_SHIFT, ctx->x86.pv.width);
+    SET_FIELD(guest_start_info, flags, 0, ctx->x86.pv.width);
 
     rc = 0;
 
@@ -236,7 +236,7 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
                               unsigned int vcpuid)
 {
     xc_interface *xch = ctx->xch;
-    vcpu_guest_context_any_t *vcpu = 
ctx->x86_pv.restore.vcpus[vcpuid].basic.ptr;
+    vcpu_guest_context_any_t *vcpu = 
ctx->x86.pv.restore.vcpus[vcpuid].basic.ptr;
     xen_pfn_t pfn, mfn;
     unsigned int i, gdt_count;
     int rc = -1;
@@ -251,10 +251,10 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
     }
 
     SET_FIELD(vcpu, flags,
-              GET_FIELD(vcpu, flags, ctx->x86_pv.width) | VGCF_online,
-              ctx->x86_pv.width);
+              GET_FIELD(vcpu, flags, ctx->x86.pv.width) | VGCF_online,
+              ctx->x86.pv.width);
 
-    gdt_count = GET_FIELD(vcpu, gdt_ents, ctx->x86_pv.width);
+    gdt_count = GET_FIELD(vcpu, gdt_ents, ctx->x86.pv.width);
     if ( gdt_count > FIRST_RESERVED_GDT_ENTRY )
     {
         ERROR("GDT entry count (%u) out of range (max %u)",
@@ -267,17 +267,17 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
     /* Convert GDT frames to mfns. */
     for ( i = 0; i < gdt_count; ++i )
     {
-        pfn = GET_FIELD(vcpu, gdt_frames[i], ctx->x86_pv.width);
-        if ( pfn > ctx->x86_pv.max_pfn )
+        pfn = GET_FIELD(vcpu, gdt_frames[i], ctx->x86.pv.width);
+        if ( pfn > ctx->x86.pv.max_pfn )
         {
             ERROR("GDT frame %u (pfn %#lx) out of range", i, pfn);
             goto err;
         }
 
-        if ( (ctx->x86_pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+        if ( (ctx->x86.pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
         {
             ERROR("GDT frame %u (pfn %#lx) has bad type %u", i, pfn,
-                  (ctx->x86_pv.restore.pfn_types[pfn] >>
+                  (ctx->x86.pv.restore.pfn_types[pfn] >>
                    XEN_DOMCTL_PFINFO_LTAB_SHIFT));
             goto err;
         }
@@ -290,25 +290,25 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
             goto err;
         }
 
-        SET_FIELD(vcpu, gdt_frames[i], mfn, ctx->x86_pv.width);
+        SET_FIELD(vcpu, gdt_frames[i], mfn, ctx->x86.pv.width);
     }
 
     /* Convert CR3 to an mfn. */
-    pfn = cr3_to_mfn(ctx, GET_FIELD(vcpu, ctrlreg[3], ctx->x86_pv.width));
-    if ( pfn > ctx->x86_pv.max_pfn )
+    pfn = cr3_to_mfn(ctx, GET_FIELD(vcpu, ctrlreg[3], ctx->x86.pv.width));
+    if ( pfn > ctx->x86.pv.max_pfn )
     {
         ERROR("cr3 (pfn %#lx) out of range", pfn);
         goto err;
     }
 
-    if ( (ctx->x86_pv.restore.pfn_types[pfn] &
+    if ( (ctx->x86.pv.restore.pfn_types[pfn] &
           XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-         (((xen_pfn_t)ctx->x86_pv.levels) << XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
+         (((xen_pfn_t)ctx->x86.pv.levels) << XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
     {
         ERROR("cr3 (pfn %#lx) has bad type %u, expected %u", pfn,
-              (ctx->x86_pv.restore.pfn_types[pfn] >>
+              (ctx->x86.pv.restore.pfn_types[pfn] >>
                XEN_DOMCTL_PFINFO_LTAB_SHIFT),
-              ctx->x86_pv.levels);
+              ctx->x86.pv.levels);
         goto err;
     }
 
@@ -320,27 +320,27 @@ static int process_vcpu_basic(struct xc_sr_context *ctx,
         goto err;
     }
 
-    SET_FIELD(vcpu, ctrlreg[3], mfn_to_cr3(ctx, mfn), ctx->x86_pv.width);
+    SET_FIELD(vcpu, ctrlreg[3], mfn_to_cr3(ctx, mfn), ctx->x86.pv.width);
 
     /* 64bit guests: Convert CR1 (guest pagetables) to mfn. */
-    if ( ctx->x86_pv.levels == 4 && (vcpu->x64.ctrlreg[1] & 1) )
+    if ( ctx->x86.pv.levels == 4 && (vcpu->x64.ctrlreg[1] & 1) )
     {
         pfn = vcpu->x64.ctrlreg[1] >> PAGE_SHIFT;
 
-        if ( pfn > ctx->x86_pv.max_pfn )
+        if ( pfn > ctx->x86.pv.max_pfn )
         {
             ERROR("cr1 (pfn %#lx) out of range", pfn);
             goto err;
         }
 
-        if ( (ctx->x86_pv.restore.pfn_types[pfn] &
+        if ( (ctx->x86.pv.restore.pfn_types[pfn] &
               XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-             (((xen_pfn_t)ctx->x86_pv.levels) << XEN_DOMCTL_PFINFO_LTAB_SHIFT) 
)
+             (((xen_pfn_t)ctx->x86.pv.levels) << XEN_DOMCTL_PFINFO_LTAB_SHIFT) 
)
         {
             ERROR("cr1 (pfn %#lx) has bad type %u, expected %u", pfn,
-                  (ctx->x86_pv.restore.pfn_types[pfn] >>
+                  (ctx->x86.pv.restore.pfn_types[pfn] >>
                    XEN_DOMCTL_PFINFO_LTAB_SHIFT),
-                  ctx->x86_pv.levels);
+                  ctx->x86.pv.levels);
             goto err;
         }
 
@@ -375,7 +375,7 @@ static int process_vcpu_extended(struct xc_sr_context *ctx,
 {
     xc_interface *xch = ctx->xch;
     struct xc_sr_x86_pv_restore_vcpu *vcpu =
-        &ctx->x86_pv.restore.vcpus[vcpuid];
+        &ctx->x86.pv.restore.vcpus[vcpuid];
     DECLARE_DOMCTL;
 
     domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
@@ -399,7 +399,7 @@ static int process_vcpu_xsave(struct xc_sr_context *ctx,
 {
     xc_interface *xch = ctx->xch;
     struct xc_sr_x86_pv_restore_vcpu *vcpu =
-        &ctx->x86_pv.restore.vcpus[vcpuid];
+        &ctx->x86.pv.restore.vcpus[vcpuid];
     int rc;
     DECLARE_DOMCTL;
     DECLARE_HYPERCALL_BUFFER(void, buffer);
@@ -437,7 +437,7 @@ static int process_vcpu_msrs(struct xc_sr_context *ctx,
 {
     xc_interface *xch = ctx->xch;
     struct xc_sr_x86_pv_restore_vcpu *vcpu =
-        &ctx->x86_pv.restore.vcpus[vcpuid];
+        &ctx->x86.pv.restore.vcpus[vcpuid];
     int rc;
     DECLARE_DOMCTL;
     DECLARE_HYPERCALL_BUFFER(void, buffer);
@@ -477,9 +477,9 @@ static int update_vcpu_context(struct xc_sr_context *ctx)
     unsigned int i;
     int rc = 0;
 
-    for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
+    for ( i = 0; i < ctx->x86.pv.restore.nr_vcpus; ++i )
     {
-        vcpu = &ctx->x86_pv.restore.vcpus[i];
+        vcpu = &ctx->x86.pv.restore.vcpus[i];
 
         if ( vcpu->basic.ptr )
         {
@@ -530,21 +530,21 @@ static int update_guest_p2m(struct xc_sr_context *ctx)
     unsigned int i;
     int rc = -1;
 
-    for ( i = 0; i < ctx->x86_pv.p2m_frames; ++i )
+    for ( i = 0; i < ctx->x86.pv.p2m_frames; ++i )
     {
-        pfn = ctx->x86_pv.p2m_pfns[i];
+        pfn = ctx->x86.pv.p2m_pfns[i];
 
-        if ( pfn > ctx->x86_pv.max_pfn )
+        if ( pfn > ctx->x86.pv.max_pfn )
         {
             ERROR("pfn (%#lx) for p2m_frame_list[%u] out of range",
                   pfn, i);
             goto err;
         }
 
-        if ( (ctx->x86_pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+        if ( (ctx->x86.pv.restore.pfn_types[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
         {
             ERROR("pfn (%#lx) for p2m_frame_list[%u] has bad type %u", pfn, i,
-                  (ctx->x86_pv.restore.pfn_types[pfn] >>
+                  (ctx->x86.pv.restore.pfn_types[pfn] >>
                    XEN_DOMCTL_PFINFO_LTAB_SHIFT));
             goto err;
         }
@@ -557,25 +557,25 @@ static int update_guest_p2m(struct xc_sr_context *ctx)
             goto err;
         }
 
-        ctx->x86_pv.p2m_pfns[i] = mfn;
+        ctx->x86.pv.p2m_pfns[i] = mfn;
     }
 
     guest_p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_WRITE,
-                                     ctx->x86_pv.p2m_pfns,
-                                     ctx->x86_pv.p2m_frames);
+                                     ctx->x86.pv.p2m_pfns,
+                                     ctx->x86.pv.p2m_frames);
     if ( !guest_p2m )
     {
         PERROR("Failed to map p2m frames");
         goto err;
     }
 
-    memcpy(guest_p2m, ctx->x86_pv.p2m,
-           (ctx->x86_pv.max_pfn + 1) * ctx->x86_pv.width);
+    memcpy(guest_p2m, ctx->x86.pv.p2m,
+           (ctx->x86.pv.max_pfn + 1) * ctx->x86.pv.width);
     rc = 0;
 
  err:
     if ( guest_p2m )
-        munmap(guest_p2m, ctx->x86_pv.p2m_frames * PAGE_SIZE);
+        munmap(guest_p2m, ctx->x86.pv.p2m_frames * PAGE_SIZE);
 
     return rc;
 }
@@ -604,7 +604,7 @@ static int handle_x86_pv_info(struct xc_sr_context *ctx,
     xc_interface *xch = ctx->xch;
     struct xc_sr_rec_x86_pv_info *info = rec->data;
 
-    if ( ctx->x86_pv.restore.seen_pv_info )
+    if ( ctx->x86.pv.restore.seen_pv_info )
     {
         ERROR("Already received X86_PV_INFO record");
         return -1;
@@ -628,7 +628,7 @@ static int handle_x86_pv_info(struct xc_sr_context *ctx,
      * PV domains default to native width.  For an incomming compat domain, we
      * will typically be the first entity to inform Xen.
      */
-    if ( info->guest_width != ctx->x86_pv.width )
+    if ( info->guest_width != ctx->x86.pv.width )
     {
         struct xen_domctl domctl = {
             .domain = ctx->domid,
@@ -654,16 +654,16 @@ static int handle_x86_pv_info(struct xc_sr_context *ctx,
     }
 
     /* Sanity check (possibly new) domain settings. */
-    if ( (info->guest_width != ctx->x86_pv.width) ||
-         (info->pt_levels   != ctx->x86_pv.levels) )
+    if ( (info->guest_width != ctx->x86.pv.width) ||
+         (info->pt_levels   != ctx->x86.pv.levels) )
     {
         ERROR("X86_PV_INFO width/pt_levels settings %u/%u mismatch with d%d 
%u/%u",
               info->guest_width, info->pt_levels, ctx->domid,
-              ctx->x86_pv.width, ctx->x86_pv.levels);
+              ctx->x86.pv.width, ctx->x86.pv.levels);
         return -1;
     }
 
-    ctx->x86_pv.restore.seen_pv_info = true;
+    ctx->x86.pv.restore.seen_pv_info = true;
     return 0;
 }
 
@@ -676,10 +676,10 @@ static int handle_x86_pv_p2m_frames(struct xc_sr_context 
*ctx,
 {
     xc_interface *xch = ctx->xch;
     struct xc_sr_rec_x86_pv_p2m_frames *data = rec->data;
-    unsigned int start, end, x, fpp = PAGE_SIZE / ctx->x86_pv.width;
+    unsigned int start, end, x, fpp = PAGE_SIZE / ctx->x86.pv.width;
     int rc;
 
-    if ( !ctx->x86_pv.restore.seen_pv_info )
+    if ( !ctx->x86.pv.restore.seen_pv_info )
     {
         ERROR("Not yet received X86_PV_INFO record");
         return -1;
@@ -711,7 +711,7 @@ static int handle_x86_pv_p2m_frames(struct xc_sr_context 
*ctx,
         return -1;
     }
 
-    if ( data->end_pfn > ctx->x86_pv.max_pfn )
+    if ( data->end_pfn > ctx->x86.pv.max_pfn )
     {
         rc = expand_p2m(ctx, data->end_pfn);
         if ( rc )
@@ -719,7 +719,7 @@ static int handle_x86_pv_p2m_frames(struct xc_sr_context 
*ctx,
     }
 
     for ( x = 0; x < (end - start); ++x )
-        ctx->x86_pv.p2m_pfns[start + x] = data->p2m_pfns[x];
+        ctx->x86.pv.p2m_pfns[start + x] = data->p2m_pfns[x];
 
     return 0;
 }
@@ -788,21 +788,21 @@ static int handle_x86_pv_vcpu_blob(struct xc_sr_context 
*ctx,
     }
 
     /* Check that the vcpu id is within range. */
-    if ( vhdr->vcpu_id >= ctx->x86_pv.restore.nr_vcpus )
+    if ( vhdr->vcpu_id >= ctx->x86.pv.restore.nr_vcpus )
     {
         ERROR("%s record vcpu_id (%u) exceeds domain max (%u)",
-              rec_name, vhdr->vcpu_id, ctx->x86_pv.restore.nr_vcpus - 1);
+              rec_name, vhdr->vcpu_id, ctx->x86.pv.restore.nr_vcpus - 1);
         goto out;
     }
 
-    vcpu = &ctx->x86_pv.restore.vcpus[vhdr->vcpu_id];
+    vcpu = &ctx->x86.pv.restore.vcpus[vhdr->vcpu_id];
 
     /* Further per-record checks, where possible. */
     switch ( rec->type )
     {
     case REC_TYPE_X86_PV_VCPU_BASIC:
     {
-        size_t vcpusz = ctx->x86_pv.width == 8 ?
+        size_t vcpusz = ctx->x86.pv.width == 8 ?
             sizeof(vcpu_guest_context_x86_64_t) :
             sizeof(vcpu_guest_context_x86_32_t);
 
@@ -868,7 +868,7 @@ static int handle_shared_info(struct xc_sr_context *ctx,
     shared_info_any_t *guest_shinfo = NULL;
     const shared_info_any_t *old_shinfo = rec->data;
 
-    if ( !ctx->x86_pv.restore.seen_pv_info )
+    if ( !ctx->x86.pv.restore.seen_pv_info )
     {
         ERROR("Not yet received X86_PV_INFO record");
         return -1;
@@ -891,18 +891,18 @@ static int handle_shared_info(struct xc_sr_context *ctx,
         goto err;
     }
 
-    MEMCPY_FIELD(guest_shinfo, old_shinfo, vcpu_info, ctx->x86_pv.width);
-    MEMCPY_FIELD(guest_shinfo, old_shinfo, arch, ctx->x86_pv.width);
+    MEMCPY_FIELD(guest_shinfo, old_shinfo, vcpu_info, ctx->x86.pv.width);
+    MEMCPY_FIELD(guest_shinfo, old_shinfo, arch, ctx->x86.pv.width);
 
     SET_FIELD(guest_shinfo, arch.pfn_to_mfn_frame_list_list,
-              0, ctx->x86_pv.width);
+              0, ctx->x86.pv.width);
 
-    MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_pending, 0, ctx->x86_pv.width);
+    MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_pending, 0, ctx->x86.pv.width);
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
         SET_FIELD(guest_shinfo, vcpu_info[i].evtchn_pending_sel,
-                  0, ctx->x86_pv.width);
+                  0, ctx->x86.pv.width);
 
-    MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_mask, 0xff, ctx->x86_pv.width);
+    MEMSET_ARRAY_FIELD(guest_shinfo, evtchn_mask, 0xff, ctx->x86.pv.width);
 
     rc = 0;
 
@@ -916,30 +916,30 @@ static int handle_shared_info(struct xc_sr_context *ctx,
 /* restore_ops function. */
 static bool x86_pv_pfn_is_valid(const struct xc_sr_context *ctx, xen_pfn_t pfn)
 {
-    return pfn <= ctx->x86_pv.max_pfn;
+    return pfn <= ctx->x86.pv.max_pfn;
 }
 
 /* restore_ops function. */
 static void x86_pv_set_page_type(struct xc_sr_context *ctx, xen_pfn_t pfn,
                                  unsigned long type)
 {
-    assert(pfn <= ctx->x86_pv.max_pfn);
+    assert(pfn <= ctx->x86.pv.max_pfn);
 
-    ctx->x86_pv.restore.pfn_types[pfn] = type;
+    ctx->x86.pv.restore.pfn_types[pfn] = type;
 }
 
 /* restore_ops function. */
 static void x86_pv_set_gfn(struct xc_sr_context *ctx, xen_pfn_t pfn,
                            xen_pfn_t mfn)
 {
-    assert(pfn <= ctx->x86_pv.max_pfn);
+    assert(pfn <= ctx->x86.pv.max_pfn);
 
-    if ( ctx->x86_pv.width == sizeof(uint64_t) )
+    if ( ctx->x86.pv.width == sizeof(uint64_t) )
         /* 64 bit guest.  Need to expand INVALID_MFN for 32 bit toolstacks. */
-        ((uint64_t *)ctx->x86_pv.p2m)[pfn] = mfn == INVALID_MFN ? ~0ULL : mfn;
+        ((uint64_t *)ctx->x86.pv.p2m)[pfn] = mfn == INVALID_MFN ? ~0ULL : mfn;
     else
         /* 32 bit guest.  Can truncate INVALID_MFN for 64 bit toolstacks. */
-        ((uint32_t *)ctx->x86_pv.p2m)[pfn] = mfn;
+        ((uint32_t *)ctx->x86.pv.p2m)[pfn] = mfn;
 }
 
 /*
@@ -1043,10 +1043,10 @@ static int x86_pv_setup(struct xc_sr_context *ctx)
     if ( rc )
         return rc;
 
-    ctx->x86_pv.restore.nr_vcpus = ctx->dominfo.max_vcpu_id + 1;
-    ctx->x86_pv.restore.vcpus = calloc(sizeof(struct 
xc_sr_x86_pv_restore_vcpu),
-                                       ctx->x86_pv.restore.nr_vcpus);
-    if ( !ctx->x86_pv.restore.vcpus )
+    ctx->x86.pv.restore.nr_vcpus = ctx->dominfo.max_vcpu_id + 1;
+    ctx->x86.pv.restore.vcpus = calloc(sizeof(struct 
xc_sr_x86_pv_restore_vcpu),
+                                       ctx->x86.pv.restore.nr_vcpus);
+    if ( !ctx->x86.pv.restore.vcpus )
     {
         errno = ENOMEM;
         return -1;
@@ -1130,17 +1130,17 @@ static int x86_pv_stream_complete(struct xc_sr_context 
*ctx)
  */
 static int x86_pv_cleanup(struct xc_sr_context *ctx)
 {
-    free(ctx->x86_pv.p2m);
-    free(ctx->x86_pv.p2m_pfns);
+    free(ctx->x86.pv.p2m);
+    free(ctx->x86.pv.p2m_pfns);
 
-    if ( ctx->x86_pv.restore.vcpus )
+    if ( ctx->x86.pv.restore.vcpus )
     {
         unsigned int i;
 
-        for ( i = 0; i < ctx->x86_pv.restore.nr_vcpus; ++i )
+        for ( i = 0; i < ctx->x86.pv.restore.nr_vcpus; ++i )
         {
             struct xc_sr_x86_pv_restore_vcpu *vcpu =
-                &ctx->x86_pv.restore.vcpus[i];
+                &ctx->x86.pv.restore.vcpus[i];
 
             free(vcpu->basic.ptr);
             free(vcpu->extd.ptr);
@@ -1148,13 +1148,13 @@ static int x86_pv_cleanup(struct xc_sr_context *ctx)
             free(vcpu->msr.ptr);
         }
 
-        free(ctx->x86_pv.restore.vcpus);
+        free(ctx->x86.pv.restore.vcpus);
     }
 
-    free(ctx->x86_pv.restore.pfn_types);
+    free(ctx->x86.pv.restore.pfn_types);
 
-    if ( ctx->x86_pv.m2p )
-        munmap(ctx->x86_pv.m2p, ctx->x86_pv.nr_m2p_frames * PAGE_SIZE);
+    if ( ctx->x86.pv.m2p )
+        munmap(ctx->x86.pv.m2p, ctx->x86.pv.nr_m2p_frames * PAGE_SIZE);
 
     return 0;
 }
diff --git a/tools/libxc/xc_sr_save_x86_hvm.c b/tools/libxc/xc_sr_save_x86_hvm.c
index d925a81999..58722118ae 100644
--- a/tools/libxc/xc_sr_save_x86_hvm.c
+++ b/tools/libxc/xc_sr_save_x86_hvm.c
@@ -165,7 +165,7 @@ static int x86_hvm_setup(struct xc_sr_context *ctx)
         return -1;
     }
 
-    ctx->x86_hvm.save.qemu_enabled_logdirty = true;
+    ctx->x86.hvm.save.qemu_enabled_logdirty = true;
 
     return 0;
 }
@@ -197,7 +197,7 @@ static int x86_hvm_cleanup(struct xc_sr_context *ctx)
     xc_interface *xch = ctx->xch;
 
     /* If qemu successfully enabled logdirty mode, attempt to disable. */
-    if ( ctx->x86_hvm.save.qemu_enabled_logdirty &&
+    if ( ctx->x86.hvm.save.qemu_enabled_logdirty &&
          ctx->save.callbacks->switch_qemu_logdirty(
              ctx->domid, 0, ctx->save.callbacks->data) )
     {
diff --git a/tools/libxc/xc_sr_save_x86_pv.c b/tools/libxc/xc_sr_save_x86_pv.c
index 94d0f68911..c1c6892666 100644
--- a/tools/libxc/xc_sr_save_x86_pv.c
+++ b/tools/libxc/xc_sr_save_x86_pv.c
@@ -16,9 +16,9 @@ static int map_shinfo(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
 
-    ctx->x86_pv.shinfo = xc_map_foreign_range(
+    ctx->x86.pv.shinfo = xc_map_foreign_range(
         xch, ctx->domid, PAGE_SIZE, PROT_READ, ctx->dominfo.shared_info_frame);
-    if ( !ctx->x86_pv.shinfo )
+    if ( !ctx->x86.pv.shinfo )
     {
         PERROR("Failed to map shared info frame at mfn %#lx",
                ctx->dominfo.shared_info_frame);
@@ -37,7 +37,7 @@ static int copy_mfns_from_guest(const struct xc_sr_context 
*ctx,
 {
     size_t x;
 
-    if ( ctx->x86_pv.width == sizeof(unsigned long) )
+    if ( ctx->x86.pv.width == sizeof(unsigned long) )
         memcpy(dst, src, count * sizeof(*dst));
     else
     {
@@ -82,18 +82,18 @@ static int map_p2m_leaves(struct xc_sr_context *ctx, 
xen_pfn_t *mfns,
     xc_interface *xch = ctx->xch;
     unsigned int x;
 
-    ctx->x86_pv.p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_READ,
+    ctx->x86.pv.p2m = xc_map_foreign_pages(xch, ctx->domid, PROT_READ,
                                            mfns, n_mfns);
-    if ( !ctx->x86_pv.p2m )
+    if ( !ctx->x86.pv.p2m )
     {
         PERROR("Failed to map p2m frames");
         return -1;
     }
 
-    ctx->save.p2m_size = ctx->x86_pv.max_pfn + 1;
-    ctx->x86_pv.p2m_frames = n_mfns;
-    ctx->x86_pv.p2m_pfns = malloc(n_mfns * sizeof(*mfns));
-    if ( !ctx->x86_pv.p2m_pfns )
+    ctx->save.p2m_size = ctx->x86.pv.max_pfn + 1;
+    ctx->x86.pv.p2m_frames = n_mfns;
+    ctx->x86.pv.p2m_pfns = malloc(n_mfns * sizeof(*mfns));
+    if ( !ctx->x86.pv.p2m_pfns )
     {
         ERROR("Cannot allocate %zu bytes for p2m pfns list",
               n_mfns * sizeof(*mfns));
@@ -111,7 +111,7 @@ static int map_p2m_leaves(struct xc_sr_context *ctx, 
xen_pfn_t *mfns,
             return -1;
         }
 
-        ctx->x86_pv.p2m_pfns[x] = mfn_to_pfn(ctx, mfns[x]);
+        ctx->x86.pv.p2m_pfns[x] = mfn_to_pfn(ctx, mfns[x]);
     }
 
     return 0;
@@ -144,17 +144,17 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
     void *guest_fl = NULL;
     size_t local_fl_size;
 
-    fpp = PAGE_SIZE / ctx->x86_pv.width;
-    fll_entries = (ctx->x86_pv.max_pfn / (fpp * fpp)) + 1;
+    fpp = PAGE_SIZE / ctx->x86.pv.width;
+    fll_entries = (ctx->x86.pv.max_pfn / (fpp * fpp)) + 1;
     if ( fll_entries > fpp )
     {
-        ERROR("max_pfn %#lx too large for p2m tree", ctx->x86_pv.max_pfn);
+        ERROR("max_pfn %#lx too large for p2m tree", ctx->x86.pv.max_pfn);
         goto err;
     }
 
-    fll_mfn = GET_FIELD(ctx->x86_pv.shinfo, arch.pfn_to_mfn_frame_list_list,
-                        ctx->x86_pv.width);
-    if ( fll_mfn == 0 || fll_mfn > ctx->x86_pv.max_mfn )
+    fll_mfn = GET_FIELD(ctx->x86.pv.shinfo, arch.pfn_to_mfn_frame_list_list,
+                        ctx->x86.pv.width);
+    if ( fll_mfn == 0 || fll_mfn > ctx->x86.pv.max_mfn )
     {
         ERROR("Bad mfn %#lx for p2m frame list list", fll_mfn);
         goto err;
@@ -189,7 +189,7 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
     saved_x = 0;
     for ( x = 0; x < fll_entries; ++x )
     {
-        if ( local_fll[x] == 0 || local_fll[x] > ctx->x86_pv.max_mfn )
+        if ( local_fll[x] == 0 || local_fll[x] > ctx->x86.pv.max_mfn )
         {
             ERROR("Bad mfn %#lx at index %u (of %u) in p2m frame list list",
                   local_fll[x], x, fll_entries);
@@ -213,15 +213,15 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
      * needed for p2m and logdirty map.
      */
     max_pfn = (saved_x + 1) * fpp * fpp - 1;
-    if ( max_pfn < ctx->x86_pv.max_pfn )
+    if ( max_pfn < ctx->x86.pv.max_pfn )
     {
-        ctx->x86_pv.max_pfn = max_pfn;
-        fll_entries = (ctx->x86_pv.max_pfn / (fpp * fpp)) + 1;
+        ctx->x86.pv.max_pfn = max_pfn;
+        fll_entries = (ctx->x86.pv.max_pfn / (fpp * fpp)) + 1;
     }
-    ctx->x86_pv.p2m_frames = (ctx->x86_pv.max_pfn + fpp) / fpp;
-    DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86_pv.max_pfn,
-            ctx->x86_pv.p2m_frames);
-    fl_entries  = (ctx->x86_pv.max_pfn / fpp) + 1;
+    ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp;
+    DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86.pv.max_pfn,
+            ctx->x86.pv.p2m_frames);
+    fl_entries  = (ctx->x86.pv.max_pfn / fpp) + 1;
 
     /* Map the guest mid p2m frames. */
     guest_fl = xc_map_foreign_pages(xch, ctx->domid, PROT_READ,
@@ -249,7 +249,7 @@ static int map_p2m_tree(struct xc_sr_context *ctx)
 
     for ( x = 0; x < fl_entries; ++x )
     {
-        if ( local_fl[x] == 0 || local_fl[x] > ctx->x86_pv.max_mfn )
+        if ( local_fl[x] == 0 || local_fl[x] > ctx->x86.pv.max_mfn )
         {
             ERROR("Bad mfn %#lx at index %u (of %u) in p2m frame list",
                   local_fl[x], x, fl_entries);
@@ -281,11 +281,11 @@ static int get_p2m_generation(struct xc_sr_context *ctx)
     uint64_t p2m_generation;
     int rc;
 
-    p2m_generation = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_generation,
-                               ctx->x86_pv.width);
+    p2m_generation = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_generation,
+                               ctx->x86.pv.width);
 
-    rc = (p2m_generation == ctx->x86_pv.p2m_generation) ? 0 : -1;
-    ctx->x86_pv.p2m_generation = p2m_generation;
+    rc = (p2m_generation == ctx->x86.pv.p2m_generation) ? 0 : -1;
+    ctx->x86.pv.p2m_generation = p2m_generation;
 
     return rc;
 }
@@ -322,7 +322,7 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t 
p2m_cr3)
 
     p2m_mfn = cr3_to_mfn(ctx, p2m_cr3);
     assert(p2m_mfn != 0);
-    if ( p2m_mfn > ctx->x86_pv.max_mfn )
+    if ( p2m_mfn > ctx->x86.pv.max_mfn )
     {
         ERROR("Bad p2m_cr3 value %#" PRIx64, p2m_cr3);
         errno = ERANGE;
@@ -331,13 +331,13 @@ static int map_p2m_list(struct xc_sr_context *ctx, 
uint64_t p2m_cr3)
 
     get_p2m_generation(ctx);
 
-    p2m_vaddr = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_vaddr,
-                          ctx->x86_pv.width);
-    fpp = PAGE_SIZE / ctx->x86_pv.width;
-    ctx->x86_pv.p2m_frames = ctx->x86_pv.max_pfn / fpp + 1;
-    p2m_end = p2m_vaddr + ctx->x86_pv.p2m_frames * PAGE_SIZE - 1;
+    p2m_vaddr = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_vaddr,
+                          ctx->x86.pv.width);
+    fpp = PAGE_SIZE / ctx->x86.pv.width;
+    ctx->x86.pv.p2m_frames = ctx->x86.pv.max_pfn / fpp + 1;
+    p2m_end = p2m_vaddr + ctx->x86.pv.p2m_frames * PAGE_SIZE - 1;
 
-    if ( ctx->x86_pv.width == 8 )
+    if ( ctx->x86.pv.width == 8 )
     {
         mask = 0x0000ffffffffffffULL;
         if ( !is_canonical_address(p2m_vaddr) ||
@@ -368,8 +368,8 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t 
p2m_cr3)
 
     DPRINTF("p2m list from %#" PRIx64 " to %#" PRIx64 ", root at %#lx",
             p2m_vaddr, p2m_end, p2m_mfn);
-    DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86_pv.max_pfn,
-            ctx->x86_pv.p2m_frames);
+    DPRINTF("max_pfn %#lx, p2m_frames %d", ctx->x86.pv.max_pfn,
+            ctx->x86.pv.p2m_frames);
 
     mfns = malloc(sizeof(*mfns));
     if ( !mfns )
@@ -382,7 +382,7 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t 
p2m_cr3)
     saved_mfn = 0;
     idx_start = idx_end = saved_idx = 0;
 
-    for ( level = ctx->x86_pv.levels; level > 0; level-- )
+    for ( level = ctx->x86.pv.levels; level > 0; level-- )
     {
         n_pages = idx_end - idx_start + 1;
         ptes = xc_map_foreign_pages(xch, ctx->domid, PROT_READ, mfns, n_pages);
@@ -407,7 +407,7 @@ static int map_p2m_list(struct xc_sr_context *ctx, uint64_t 
p2m_cr3)
         for ( idx = idx_start; idx <= idx_end; idx++ )
         {
             mfn = pte_to_frame(ptes[idx]);
-            if ( mfn == 0 || mfn > ctx->x86_pv.max_mfn )
+            if ( mfn == 0 || mfn > ctx->x86.pv.max_mfn )
             {
                 ERROR("Bad mfn %#lx during page table walk for vaddr %#" 
PRIx64 " at level %d of p2m list",
                       mfn, off + ((xen_vaddr_t)idx << shift), level);
@@ -432,11 +432,11 @@ static int map_p2m_list(struct xc_sr_context *ctx, 
uint64_t p2m_cr3)
             if ( saved_idx == idx_end )
                 saved_idx++;
             max_pfn = ((xen_pfn_t)saved_idx << 9) * fpp - 1;
-            if ( max_pfn < ctx->x86_pv.max_pfn )
+            if ( max_pfn < ctx->x86.pv.max_pfn )
             {
-                ctx->x86_pv.max_pfn = max_pfn;
-                ctx->x86_pv.p2m_frames = (ctx->x86_pv.max_pfn + fpp) / fpp;
-                p2m_end = p2m_vaddr + ctx->x86_pv.p2m_frames * PAGE_SIZE - 1;
+                ctx->x86.pv.max_pfn = max_pfn;
+                ctx->x86.pv.p2m_frames = (ctx->x86.pv.max_pfn + fpp) / fpp;
+                p2m_end = p2m_vaddr + ctx->x86.pv.p2m_frames * PAGE_SIZE - 1;
                 idx_end = idx_start + saved_idx;
             }
         }
@@ -466,10 +466,10 @@ static int map_p2m(struct xc_sr_context *ctx)
 {
     uint64_t p2m_cr3;
 
-    ctx->x86_pv.p2m_generation = ~0ULL;
-    ctx->x86_pv.max_pfn = GET_FIELD(ctx->x86_pv.shinfo, arch.max_pfn,
-                                    ctx->x86_pv.width) - 1;
-    p2m_cr3 = GET_FIELD(ctx->x86_pv.shinfo, arch.p2m_cr3, ctx->x86_pv.width);
+    ctx->x86.pv.p2m_generation = ~0ULL;
+    ctx->x86.pv.max_pfn = GET_FIELD(ctx->x86.pv.shinfo, arch.max_pfn,
+                                    ctx->x86.pv.width) - 1;
+    p2m_cr3 = GET_FIELD(ctx->x86.pv.shinfo, arch.p2m_cr3, ctx->x86.pv.width);
 
     return p2m_cr3 ? map_p2m_list(ctx, p2m_cr3) : map_p2m_tree(ctx);
 }
@@ -503,7 +503,7 @@ static int write_one_vcpu_basic(struct xc_sr_context *ctx, 
uint32_t id)
     /* Vcpu0 is special: Convert the suspend record to a pfn. */
     if ( id == 0 )
     {
-        mfn = GET_FIELD(&vcpu, user_regs.edx, ctx->x86_pv.width);
+        mfn = GET_FIELD(&vcpu, user_regs.edx, ctx->x86.pv.width);
         if ( !mfn_in_pseudophysmap(ctx, mfn) )
         {
             ERROR("Bad mfn for suspend record");
@@ -512,10 +512,10 @@ static int write_one_vcpu_basic(struct xc_sr_context 
*ctx, uint32_t id)
             goto err;
         }
         SET_FIELD(&vcpu, user_regs.edx, mfn_to_pfn(ctx, mfn),
-                  ctx->x86_pv.width);
+                  ctx->x86.pv.width);
     }
 
-    gdt_count = GET_FIELD(&vcpu, gdt_ents, ctx->x86_pv.width);
+    gdt_count = GET_FIELD(&vcpu, gdt_ents, ctx->x86.pv.width);
     if ( gdt_count > FIRST_RESERVED_GDT_ENTRY )
     {
         ERROR("GDT entry count (%u) out of range (max %u)",
@@ -528,7 +528,7 @@ static int write_one_vcpu_basic(struct xc_sr_context *ctx, 
uint32_t id)
     /* Convert GDT frames to pfns. */
     for ( i = 0; i < gdt_count; ++i )
     {
-        mfn = GET_FIELD(&vcpu, gdt_frames[i], ctx->x86_pv.width);
+        mfn = GET_FIELD(&vcpu, gdt_frames[i], ctx->x86.pv.width);
         if ( !mfn_in_pseudophysmap(ctx, mfn) )
         {
             ERROR("Bad mfn for frame %u of vcpu%u's GDT", i, id);
@@ -537,11 +537,11 @@ static int write_one_vcpu_basic(struct xc_sr_context 
*ctx, uint32_t id)
             goto err;
         }
         SET_FIELD(&vcpu, gdt_frames[i], mfn_to_pfn(ctx, mfn),
-                  ctx->x86_pv.width);
+                  ctx->x86.pv.width);
     }
 
     /* Convert CR3 to a pfn. */
-    mfn = cr3_to_mfn(ctx, GET_FIELD(&vcpu, ctrlreg[3], ctx->x86_pv.width));
+    mfn = cr3_to_mfn(ctx, GET_FIELD(&vcpu, ctrlreg[3], ctx->x86.pv.width));
     if ( !mfn_in_pseudophysmap(ctx, mfn) )
     {
         ERROR("Bad mfn for vcpu%u's cr3", id);
@@ -550,10 +550,10 @@ static int write_one_vcpu_basic(struct xc_sr_context 
*ctx, uint32_t id)
         goto err;
     }
     pfn = mfn_to_pfn(ctx, mfn);
-    SET_FIELD(&vcpu, ctrlreg[3], mfn_to_cr3(ctx, pfn), ctx->x86_pv.width);
+    SET_FIELD(&vcpu, ctrlreg[3], mfn_to_cr3(ctx, pfn), ctx->x86.pv.width);
 
     /* 64bit guests: Convert CR1 (guest pagetables) to pfn. */
-    if ( ctx->x86_pv.levels == 4 && vcpu.x64.ctrlreg[1] )
+    if ( ctx->x86.pv.levels == 4 && vcpu.x64.ctrlreg[1] )
     {
         mfn = vcpu.x64.ctrlreg[1] >> PAGE_SHIFT;
         if ( !mfn_in_pseudophysmap(ctx, mfn) )
@@ -567,7 +567,7 @@ static int write_one_vcpu_basic(struct xc_sr_context *ctx, 
uint32_t id)
         vcpu.x64.ctrlreg[1] = 1 | ((uint64_t)pfn << PAGE_SHIFT);
     }
 
-    if ( ctx->x86_pv.width == 8 )
+    if ( ctx->x86.pv.width == 8 )
         rc = write_split_record(ctx, &rec, &vcpu, sizeof(vcpu.x64));
     else
         rc = write_split_record(ctx, &rec, &vcpu, sizeof(vcpu.x32));
@@ -785,8 +785,8 @@ static int write_all_vcpu_information(struct xc_sr_context 
*ctx)
 static int write_x86_pv_info(struct xc_sr_context *ctx)
 {
     struct xc_sr_rec_x86_pv_info info = {
-        .guest_width = ctx->x86_pv.width,
-        .pt_levels = ctx->x86_pv.levels,
+        .guest_width = ctx->x86.pv.width,
+        .pt_levels = ctx->x86.pv.levels,
     };
     struct xc_sr_record rec = {
         .type = REC_TYPE_X86_PV_INFO,
@@ -805,10 +805,10 @@ static int write_x86_pv_p2m_frames(struct xc_sr_context 
*ctx)
 {
     xc_interface *xch = ctx->xch;
     int rc; unsigned int i;
-    size_t datasz = ctx->x86_pv.p2m_frames * sizeof(uint64_t);
+    size_t datasz = ctx->x86.pv.p2m_frames * sizeof(uint64_t);
     uint64_t *data = NULL;
     struct xc_sr_rec_x86_pv_p2m_frames hdr = {
-        .end_pfn = ctx->x86_pv.max_pfn,
+        .end_pfn = ctx->x86.pv.max_pfn,
     };
     struct xc_sr_record rec = {
         .type = REC_TYPE_X86_PV_P2M_FRAMES,
@@ -817,7 +817,7 @@ static int write_x86_pv_p2m_frames(struct xc_sr_context 
*ctx)
     };
 
     /* No need to translate if sizeof(uint64_t) == sizeof(xen_pfn_t). */
-    if ( sizeof(uint64_t) != sizeof(*ctx->x86_pv.p2m_pfns) )
+    if ( sizeof(uint64_t) != sizeof(*ctx->x86.pv.p2m_pfns) )
     {
         if ( !(data = malloc(datasz)) )
         {
@@ -826,15 +826,15 @@ static int write_x86_pv_p2m_frames(struct xc_sr_context 
*ctx)
             return -1;
         }
 
-        for ( i = 0; i < ctx->x86_pv.p2m_frames; ++i )
-            data[i] = ctx->x86_pv.p2m_pfns[i];
+        for ( i = 0; i < ctx->x86.pv.p2m_frames; ++i )
+            data[i] = ctx->x86.pv.p2m_pfns[i];
     }
     else
-        data = (uint64_t *)ctx->x86_pv.p2m_pfns;
+        data = (uint64_t *)ctx->x86.pv.p2m_pfns;
 
     rc = write_split_record(ctx, &rec, data, datasz);
 
-    if ( data != (uint64_t *)ctx->x86_pv.p2m_pfns )
+    if ( data != (uint64_t *)ctx->x86.pv.p2m_pfns )
         free(data);
 
     return rc;
@@ -848,7 +848,7 @@ static int write_shared_info(struct xc_sr_context *ctx)
     struct xc_sr_record rec = {
         .type = REC_TYPE_SHARED_INFO,
         .length = PAGE_SIZE,
-        .data = ctx->x86_pv.shinfo,
+        .data = ctx->x86.pv.shinfo,
     };
 
     return write_record(ctx, &rec);
@@ -867,7 +867,7 @@ static int normalise_pagetable(struct xc_sr_context *ctx, 
const uint64_t *src,
 
     type &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
 
-    if ( ctx->x86_pv.levels == 4 )
+    if ( ctx->x86.pv.levels == 4 )
     {
         /* 64bit guests only have Xen mappings in their L4 tables. */
         if ( type == XEN_DOMCTL_PFINFO_L4TAB )
@@ -899,7 +899,7 @@ static int normalise_pagetable(struct xc_sr_context *ctx, 
const uint64_t *src,
              * are normal but only a few will have Xen mappings.
              */
             i = (HYPERVISOR_VIRT_START_X86_32 >> L2_PAGETABLE_SHIFT_PAE) & 511;
-            if ( pte_to_frame(src[i]) == ctx->x86_pv.compat_m2p_mfn0 )
+            if ( pte_to_frame(src[i]) == ctx->x86.pv.compat_m2p_mfn0 )
             {
                 xen_first = i;
                 xen_last = (HYPERVISOR_VIRT_END_X86_32 >>
@@ -980,9 +980,9 @@ static int normalise_pagetable(struct xc_sr_context *ctx, 
const uint64_t *src,
 static xen_pfn_t x86_pv_pfn_to_gfn(const struct xc_sr_context *ctx,
                                    xen_pfn_t pfn)
 {
-    assert(pfn <= ctx->x86_pv.max_pfn);
+    assert(pfn <= ctx->x86.pv.max_pfn);
 
-    return xc_pfn_to_mfn(pfn, ctx->x86_pv.p2m, ctx->x86_pv.width);
+    return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width);
 }
 
 
@@ -1063,7 +1063,7 @@ static int x86_pv_end_of_checkpoint(struct xc_sr_context 
*ctx)
 
 static int x86_pv_check_vm_state(struct xc_sr_context *ctx)
 {
-    if ( ctx->x86_pv.p2m_generation == ~0ULL )
+    if ( ctx->x86.pv.p2m_generation == ~0ULL )
         return 0;
 
     return x86_pv_check_vm_state_p2m_list(ctx);
@@ -1071,16 +1071,16 @@ static int x86_pv_check_vm_state(struct xc_sr_context 
*ctx)
 
 static int x86_pv_cleanup(struct xc_sr_context *ctx)
 {
-    free(ctx->x86_pv.p2m_pfns);
+    free(ctx->x86.pv.p2m_pfns);
 
-    if ( ctx->x86_pv.p2m )
-        munmap(ctx->x86_pv.p2m, ctx->x86_pv.p2m_frames * PAGE_SIZE);
+    if ( ctx->x86.pv.p2m )
+        munmap(ctx->x86.pv.p2m, ctx->x86.pv.p2m_frames * PAGE_SIZE);
 
-    if ( ctx->x86_pv.shinfo )
-        munmap(ctx->x86_pv.shinfo, PAGE_SIZE);
+    if ( ctx->x86.pv.shinfo )
+        munmap(ctx->x86.pv.shinfo, PAGE_SIZE);
 
-    if ( ctx->x86_pv.m2p )
-        munmap(ctx->x86_pv.m2p, ctx->x86_pv.nr_m2p_frames * PAGE_SIZE);
+    if ( ctx->x86.pv.m2p )
+        munmap(ctx->x86.pv.m2p, ctx->x86.pv.nr_m2p_frames * PAGE_SIZE);
 
     return 0;
 }
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.