[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/7] move restore global variables into a global context



move restore global variables into a global context
---
 tools/libxc/xc_domain_restore.c |  277 +++++++++++++++++++--------------------
 1 files changed, 135 insertions(+), 142 deletions(-)

diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index e3d2d4a..6430e91 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -32,32 +32,25 @@
 #include <xen/hvm/ioreq.h>
 #include <xen/hvm/params.h>
 
-/* max mfn of the current host machine */
-static unsigned long max_mfn;
-
-/* virtual starting address of the hypervisor */
-static unsigned long hvirt_start;
-
-/* #levels of page tables used by the current guest */
-static unsigned int pt_levels;
-
-/* number of pfns this guest has (i.e. number of entries in the P2M) */
-static unsigned long p2m_size;
-
-/* number of 'in use' pfns in the guest (i.e. #P2M entries with a valid mfn) */
-static unsigned long nr_pfns;
-
-/* Live mapping of the table mapping each PFN to its current MFN. */
-static xen_pfn_t *live_p2m = NULL;
-
-/* A table mapping each PFN to its new MFN. */
-static xen_pfn_t *p2m = NULL;
-
-/* Address size of the guest, in bytes */
-unsigned int guest_width;
-
-/* If have enough continuous memory for super page allocation */
-static unsigned no_superpage_mem = 0;
+struct restore_ctx {
+    unsigned long max_mfn; /* max mfn of the current host machine */
+    unsigned long hvirt_start; /* virtual starting address of the hypervisor */
+    unsigned int pt_levels; /* #levels of page tables used by the current 
guest */
+    unsigned long p2m_size; /* number of pfns this guest has (i.e. number of 
entries in the P2M) */
+    unsigned long nr_pfns; /* number of 'in use' pfns in the guest (i.e. #P2M 
entries with a valid mfn) */
+    xen_pfn_t *live_p2m; /* Live mapping of the table mapping each PFN to its 
current MFN. */
+    xen_pfn_t *p2m; /* A table mapping each PFN to its new MFN. */
+    unsigned int guest_width; /* Address size of the guest, in bytes */
+    unsigned no_superpage_mem; /* If have enough continuous memory for super 
page allocation */
+};
+
+struct restore_ctx _ctx = {
+    .live_p2m = NULL,
+    .p2m = NULL,
+    .no_superpage_mem = 0,
+};
+
+struct restore_ctx *ctx = &_ctx;
 
 /*
 **
@@ -84,7 +77,7 @@ static int super_page_populated(unsigned long pfn)
     pfn &= ~(SUPERPAGE_NR_PFNS - 1);
     for ( i = pfn; i < pfn + SUPERPAGE_NR_PFNS; i++ )
     {
-        if ( p2m[i] != INVALID_P2M_ENTRY )
+        if ( ctx->p2m[i] != INVALID_P2M_ENTRY )
             return 1;
     }
     return 0;
@@ -109,7 +102,7 @@ static int break_super_page(int xc_handle,
     for ( i = start_pfn; i < start_pfn + SUPERPAGE_NR_PFNS; i++ )
     {
         /* check the 2M page are populated */
-        if ( p2m[i] == INVALID_P2M_ENTRY ) {
+        if ( ctx->p2m[i] == INVALID_P2M_ENTRY ) {
             DPRINTF("Previous super page was populated wrongly!\n");
             return 1;
         }
@@ -158,7 +151,7 @@ static int break_super_page(int xc_handle,
     start_pfn = next_pfn & ~(SUPERPAGE_NR_PFNS - 1);
     for ( i = start_pfn; i < start_pfn + SUPERPAGE_NR_PFNS; i++ )
     {
-        p2m[i] = INVALID_P2M_ENTRY;
+        ctx->p2m[i] = INVALID_P2M_ENTRY;
     }
 
     for ( i = start_pfn; i < start_pfn + tot_pfns; i++ )
@@ -172,7 +165,7 @@ static int break_super_page(int xc_handle,
             rc = 1;
             goto out;
         }
-        p2m[i] = mfn;
+        ctx->p2m[i] = mfn;
     }
 
     /* restore contents */
@@ -224,7 +217,7 @@ static int allocate_mfn_list(int xc_handle,
     sp_pfn = *next_pfn;
 
     if ( !superpages ||
-         no_superpage_mem ||
+         ctx->no_superpage_mem ||
          !SUPER_PAGE_TRACKING(sp_pfn) )
         goto normal_page;
 
@@ -269,13 +262,13 @@ static int allocate_mfn_list(int xc_handle,
     {
         for ( i = pfn; i < pfn + SUPERPAGE_NR_PFNS; i++, mfn++ )
         {
-            p2m[i] = mfn;
+            ctx->p2m[i] = mfn;
         }
         return 0;
     }
     DPRINTF("No 2M page available for pfn 0x%lx, fall back to 4K page.\n",
             pfn);
-    no_superpage_mem = 1;
+    ctx->no_superpage_mem = 1;
 
 normal_page:
     if ( !batch_buf )
@@ -291,7 +284,7 @@ normal_page:
             continue;
 
         pfn = mfn = batch_buf[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
-        if ( p2m[pfn] == INVALID_P2M_ENTRY )
+        if ( ctx->p2m[pfn] == INVALID_P2M_ENTRY )
         {
             if (xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0,
                         0, &mfn) != 0)
@@ -301,7 +294,7 @@ normal_page:
                 errno = ENOMEM;
                 return 1;
             }
-            p2m[pfn] = mfn;
+            ctx->p2m[pfn] = mfn;
         }
     }
 
@@ -427,7 +420,7 @@ alloc_page:
         pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pfn > p2m_size )
+        if ( pfn > ctx->p2m_size )
         {
             ERROR("pfn out of range");
             return 1;
@@ -438,7 +431,7 @@ alloc_page:
         }
         else 
         {
-            if (p2m[pfn] == INVALID_P2M_ENTRY)
+            if (ctx->p2m[pfn] == INVALID_P2M_ENTRY)
             {
                 DPRINTF("Warning: pfn 0x%lx are not allocated!\n", pfn);
                 /*XXX:allocate this page?*/
@@ -446,7 +439,7 @@ alloc_page:
 
             /* setup region_mfn[] for batch map.
              * For HVM guests, this interface takes PFNs, not MFNs */
-            region_mfn[i] = hvm ? pfn : p2m[pfn]; 
+            region_mfn[i] = hvm ? pfn : ctx->p2m[pfn]; 
         }
     }
     return 0;
@@ -512,11 +505,11 @@ static int uncanonicalize_pagetable(int xc_handle, 
uint32_t dom,
     unsigned long pfn;
     uint64_t pte;
 
-    pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
+    pte_last = PAGE_SIZE / ((ctx->pt_levels == 2)? 4 : 8);
 
     for ( i = 0; i < pte_last; i++ )
     {
-        if ( pt_levels == 2 )
+        if ( ctx->pt_levels == 2 )
             pte = ((uint32_t *)page)[i];
         else
             pte = ((uint64_t *)page)[i];
@@ -525,20 +518,20 @@ static int uncanonicalize_pagetable(int xc_handle, 
uint32_t dom,
         if ( !(pte & _PAGE_PRESENT) )
             continue;
         
-        pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86(guest_width);
+        pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86(ctx->guest_width);
 
         /* Allocate mfn if necessary */
-        if ( p2m[pfn] == INVALID_P2M_ENTRY )
+        if ( ctx->p2m[pfn] == INVALID_P2M_ENTRY )
         {
             unsigned long force_pfn = superpages ? FORCE_SP_MASK : pfn;
             if (allocate_mfn_list(xc_handle, dom,
                         1, &pfn, &force_pfn, superpages) != 0)
                 return 0;
         }
-        pte &= ~MADDR_MASK_X86(guest_width);
-        pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
+        pte &= ~MADDR_MASK_X86(ctx->guest_width);
+        pte |= (uint64_t)ctx->p2m[pfn] << PAGE_SHIFT;
 
-        if ( pt_levels == 2 )
+        if ( ctx->pt_levels == 2 )
             ((uint32_t *)page)[i] = (uint32_t)pte;
         else
             ((uint64_t *)page)[i] = (uint64_t)pte;
@@ -595,14 +588,14 @@ static xen_pfn_t *load_p2m_frame_list(
                 /* Pick a guest word-size and PT depth from the ctxt size */
                 if ( chunk_bytes == sizeof (ctxt.x32) )
                 {
-                    guest_width = 4;
-                    if ( pt_levels > 2 ) 
-                        pt_levels = 3; 
+                    ctx->guest_width = 4;
+                    if ( ctx->pt_levels > 2 ) 
+                        ctx->pt_levels = 3; 
                 }
                 else if ( chunk_bytes == sizeof (ctxt.x64) )
                 {
-                    guest_width = 8;
-                    pt_levels = 4;
+                    ctx->guest_width = 8;
+                    ctx->pt_levels = 4;
                 }
                 else 
                 {
@@ -618,7 +611,7 @@ static xen_pfn_t *load_p2m_frame_list(
                 tot_bytes -= chunk_bytes;
                 chunk_bytes = 0;
 
-                if ( GET_FIELD(guest_width, &ctxt, vm_assist) 
+                if ( GET_FIELD(ctx->guest_width, &ctxt, vm_assist) 
                      & (1UL << VMASST_TYPE_pae_extended_cr3) )
                     *pae_extended_cr3 = 1;
             }
@@ -651,7 +644,7 @@ static xen_pfn_t *load_p2m_frame_list(
 
     /* Now that we know the guest's word-size, can safely allocate 
      * the p2m frame list */
-    if ( (p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE(p2m_size, guest_width))) 
== NULL )
+    if ( (p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE(ctx->p2m_size, 
ctx->guest_width))) == NULL )
     {
         ERROR("Couldn't allocate p2m_frame_list array");
         return NULL;
@@ -660,7 +653,7 @@ static xen_pfn_t *load_p2m_frame_list(
     /* First entry has already been read. */
     p2m_frame_list[0] = p2m_fl_zero;
     if ( read_exact(io_fd, &p2m_frame_list[1], 
-                    (P2M_FL_ENTRIES(p2m_size, guest_width) - 1) * 
sizeof(xen_pfn_t)) )
+                    (P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width) - 1) * 
sizeof(xen_pfn_t)) )
     {
         ERROR("read p2m_frame_list failed");
         return NULL;
@@ -902,7 +895,7 @@ static int buffer_tail_pv(struct tailbuf_pv *buf, int fd,
         buf->vcpucount++;
     }
     // DPRINTF("VCPU count: %d\n", buf->vcpucount);
-    vcpulen = ((guest_width == 8) ? sizeof(vcpu_guest_context_x86_64_t)
+    vcpulen = ((ctx->guest_width == 8) ? sizeof(vcpu_guest_context_x86_64_t)
                : sizeof(vcpu_guest_context_x86_32_t)) * buf->vcpucount;
     if ( ext_vcpucontext )
         vcpulen += 128 * buf->vcpucount;
@@ -1202,7 +1195,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
 
         ++curpage;
 
-        if ( pfn > p2m_size )
+        if ( pfn > ctx->p2m_size )
         {
             ERROR("pfn out of range");
             return -1;
@@ -1210,7 +1203,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
 
         pfn_type[pfn] = pagetype;
 
-        mfn = p2m[pfn];
+        mfn = ctx->p2m[pfn];
 
         /* In verify mode, we use a copy; otherwise we work in place */
         page = pagebuf->verify ? (void *)buf : (region_base + i*PAGE_SIZE);
@@ -1231,7 +1224,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
             ** so we may need to update the p2m after the main loop.
             ** Hence we defer canonicalization of L1s until then.
             */
-            if ((pt_levels != 3) ||
+            if ((ctx->pt_levels != 3) ||
                 pae_extended_cr3 ||
                 (pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
 
@@ -1252,7 +1245,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
         else if ( pagetype != XEN_DOMCTL_PFINFO_NOTAB )
         {
             ERROR("Bogus page type %lx page table is out of range: "
-                  "i=%d p2m_size=%lu", pagetype, i, p2m_size);
+                  "i=%d p2m_size=%lu", pagetype, i, ctx->p2m_size);
             return -1;
         }
 
@@ -1347,21 +1340,21 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
     tailbuf.ishvm = hvm;
 
     /* For info only */
-    nr_pfns = 0;
+    ctx->nr_pfns = 0;
 
     /* Always try to allocate 2M pages for HVM */
     if ( hvm )
         superpages = 1;
 
-    if ( read_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
+    if ( read_exact(io_fd, &ctx->p2m_size, sizeof(unsigned long)) )
     {
         ERROR("read: p2m_size");
         goto out;
     }
-    DPRINTF("xc_domain_restore start: p2m_size = %lx\n", p2m_size);
+    DPRINTF("xc_domain_restore start: p2m_size = %lx\n", ctx->p2m_size);
 
     if ( !get_platform_info(xc_handle, dom,
-                            &max_mfn, &hvirt_start, &pt_levels, &guest_width) )
+                            &ctx->max_mfn, &ctx->hvirt_start, &ctx->pt_levels, 
&ctx->guest_width) )
     {
         ERROR("Unable to get platform info.");
         return 1;
@@ -1370,8 +1363,8 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     /* The *current* word size of the guest isn't very interesting; for now
      * assume the guest will be the same as we are.  We'll fix that later
      * if we discover otherwise. */
-    guest_width = sizeof(unsigned long);
-    pt_levels = (guest_width == 8) ? 4 : (pt_levels == 2) ? 2 : 3; 
+    ctx->guest_width = sizeof(unsigned long);
+    ctx->pt_levels = (ctx->guest_width == 8) ? 4 : (ctx->pt_levels == 2) ? 2 : 
3; 
     
     if ( !hvm ) 
     {
@@ -1385,7 +1378,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
         memset(&domctl, 0, sizeof(domctl));
         domctl.domain = dom;
         domctl.cmd    = XEN_DOMCTL_set_address_size;
-        domctl.u.address_size.size = guest_width * 8;
+        domctl.u.address_size.size = ctx->guest_width * 8;
         frc = do_domctl(xc_handle, &domctl);
         if ( frc != 0 )
         {
@@ -1395,13 +1388,13 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
     }
 
     /* We want zeroed memory so use calloc rather than malloc. */
-    p2m        = calloc(p2m_size, sizeof(xen_pfn_t));
-    pfn_type   = calloc(p2m_size, sizeof(unsigned long));
+    ctx->p2m        = calloc(ctx->p2m_size, sizeof(xen_pfn_t));
+    pfn_type   = calloc(ctx->p2m_size, sizeof(unsigned long));
 
     region_mfn = xg_memalign(PAGE_SIZE, ROUNDUP(
                               MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
 
-    if ( (p2m == NULL) || (pfn_type == NULL) ||
+    if ( (ctx->p2m == NULL) || (pfn_type == NULL) ||
          (region_mfn == NULL) )
     {
         ERROR("memory alloc failed");
@@ -1429,8 +1422,8 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
 
     /* Mark all PFNs as invalid; we allocate on demand */
-    for ( pfn = 0; pfn < p2m_size; pfn++ )
-        p2m[pfn] = INVALID_P2M_ENTRY;
+    for ( pfn = 0; pfn < ctx->p2m_size; pfn++ )
+        ctx->p2m[pfn] = INVALID_P2M_ENTRY;
 
     mmu = xc_alloc_mmu_updates(xc_handle, dom);
     if ( mmu == NULL )
@@ -1453,7 +1446,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     {
         int j, curbatch;
 
-        this_pc = (n * 100) / p2m_size;
+        this_pc = (n * 100) / ctx->p2m_size;
         if ( (this_pc - prev_pc) >= 5 )
         {
             PPRINTF("\b\b\b\b%3d%%", this_pc);
@@ -1565,7 +1558,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     if ( hvm )
         goto finish_hvm;
 
-    if ( (pt_levels == 3) && !pae_extended_cr3 )
+    if ( (ctx->pt_levels == 3) && !pae_extended_cr3 )
     {
         /*
         ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
@@ -1582,11 +1575,11 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
         int j, k;
         
         /* First pass: find all L3TABs current in > 4G mfns and get new mfns */
-        for ( i = 0; i < p2m_size; i++ )
+        for ( i = 0; i < ctx->p2m_size; i++ )
         {
             if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
                   XEN_DOMCTL_PFINFO_L3TAB) &&
-                 (p2m[i] > 0xfffffUL) )
+                 (ctx->p2m[i] > 0xfffffUL) )
             {
                 unsigned long new_mfn;
                 uint64_t l3ptes[4];
@@ -1594,21 +1587,21 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
 
                 l3tab = (uint64_t *)
                     xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
-                                         PROT_READ, p2m[i]);
+                                         PROT_READ, ctx->p2m[i]);
 
                 for ( j = 0; j < 4; j++ )
                     l3ptes[j] = l3tab[j];
 
                 munmap(l3tab, PAGE_SIZE);
 
-                new_mfn = xc_make_page_below_4G(xc_handle, dom, p2m[i]);
+                new_mfn = xc_make_page_below_4G(xc_handle, dom, ctx->p2m[i]);
                 if ( !new_mfn )
                 {
                     ERROR("Couldn't get a page below 4GB :-(");
                     goto out;
                 }
 
-                p2m[i] = new_mfn;
+                ctx->p2m[i] = new_mfn;
                 if ( xc_add_mmu_update(xc_handle, mmu,
                                        (((unsigned long long)new_mfn)
                                         << PAGE_SHIFT) |
@@ -1620,7 +1613,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
 
                 l3tab = (uint64_t *)
                     xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
-                                         PROT_READ | PROT_WRITE, p2m[i]);
+                                         PROT_READ | PROT_WRITE, ctx->p2m[i]);
 
                 for ( j = 0; j < 4; j++ )
                     l3tab[j] = l3ptes[j];
@@ -1632,16 +1625,16 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
         /* Second pass: find all L1TABs and uncanonicalize them */
         j = 0;
 
-        for ( i = 0; i < p2m_size; i++ )
+        for ( i = 0; i < ctx->p2m_size; i++ )
         {
             if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
                   XEN_DOMCTL_PFINFO_L1TAB) )
             {
-                region_mfn[j] = p2m[i];
+                region_mfn[j] = ctx->p2m[i];
                 j++;
             }
 
-            if ( (i == (p2m_size-1)) || (j == MAX_BATCH_SIZE) )
+            if ( (i == (ctx->p2m_size-1)) || (j == MAX_BATCH_SIZE) )
             {
                 region_base = xc_map_foreign_batch(
                     xc_handle, dom, PROT_READ | PROT_WRITE, region_mfn, j);
@@ -1679,7 +1672,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
      * will barf when doing the type-checking.
      */
     nr_pins = 0;
-    for ( i = 0; i < p2m_size; i++ )
+    for ( i = 0; i < ctx->p2m_size; i++ )
     {
         if ( (pfn_type[i] & XEN_DOMCTL_PFINFO_LPINTAB) == 0 )
             continue;
@@ -1706,7 +1699,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
             continue;
         }
 
-        pin[nr_pins].arg1.mfn = p2m[i];
+        pin[nr_pins].arg1.mfn = ctx->p2m[i];
         nr_pins++;
 
         /* Batch full? Then flush. */
@@ -1729,7 +1722,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     }
 
     DPRINTF("\b\b\b\b100%%\n");
-    DPRINTF("Memory reloaded (%ld pages)\n", nr_pfns);
+    DPRINTF("Memory reloaded (%ld pages)\n", ctx->nr_pfns);
 
     /* Get the list of PFNs that are not in the psuedo-phys map */
     {
@@ -1739,12 +1732,12 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
         {
             unsigned long pfn = tailbuf.u.pv.pfntab[i];
 
-            if ( p2m[pfn] != INVALID_P2M_ENTRY )
+            if ( ctx->p2m[pfn] != INVALID_P2M_ENTRY )
             {
                 /* pfn is not in physmap now, but was at some point during
                    the save/migration process - need to free it */
-                tailbuf.u.pv.pfntab[nr_frees++] = p2m[pfn];
-                p2m[pfn]  = INVALID_P2M_ENTRY; /* not in pseudo-physical map */
+                tailbuf.u.pv.pfntab[nr_frees++] = ctx->p2m[pfn];
+                ctx->p2m[pfn]  = INVALID_P2M_ENTRY; /* not in pseudo-physical 
map */
             }
         }
 
@@ -1780,14 +1773,14 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
         if ( !(vcpumap & (1ULL << i)) )
             continue;
 
-        memcpy(&ctxt, vcpup, ((guest_width == 8) ? sizeof(ctxt.x64)
+        memcpy(&ctxt, vcpup, ((ctx->guest_width == 8) ? sizeof(ctxt.x64)
                               : sizeof(ctxt.x32)));
-        vcpup += (guest_width == 8) ? sizeof(ctxt.x64) : sizeof(ctxt.x32);
+        vcpup += (ctx->guest_width == 8) ? sizeof(ctxt.x64) : sizeof(ctxt.x32);
 
         DPRINTF("read VCPU %d\n", i);
 
         if ( !new_ctxt_format )
-            SET_FIELD(guest_width, &ctxt, flags, GET_FIELD(guest_width, &ctxt, 
flags) | VGCF_online);
+            SET_FIELD(ctx->guest_width, &ctxt, flags, 
GET_FIELD(ctx->guest_width, &ctxt, flags) | VGCF_online);
 
         if ( i == 0 )
         {
@@ -1795,86 +1788,86 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
              * Uncanonicalise the suspend-record frame number and poke
              * resume record.
              */
-            pfn = GET_FIELD(guest_width, &ctxt, user_regs.edx);
-            if ( (pfn >= p2m_size) ||
+            pfn = GET_FIELD(ctx->guest_width, &ctxt, user_regs.edx);
+            if ( (pfn >= ctx->p2m_size) ||
                  (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
             {
                 ERROR("Suspend record frame number is bad");
                 goto out;
             }
-            mfn = p2m[pfn];
-            SET_FIELD(guest_width, &ctxt, user_regs.edx, mfn);
+            mfn = ctx->p2m[pfn];
+            SET_FIELD(ctx->guest_width, &ctxt, user_regs.edx, mfn);
             start_info = xc_map_foreign_range(
                 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, mfn);
-            SET_FIELD(guest_width, start_info, nr_pages, p2m_size);
-            SET_FIELD(guest_width, start_info, shared_info, 
shared_info_frame<<PAGE_SHIFT);
-            SET_FIELD(guest_width, start_info, flags, 0);
-            *store_mfn = p2m[GET_FIELD(guest_width, start_info, store_mfn)];
-            SET_FIELD(guest_width, start_info, store_mfn, *store_mfn);
-            SET_FIELD(guest_width, start_info, store_evtchn, store_evtchn);
-            *console_mfn = p2m[GET_FIELD(guest_width, start_info, 
console.domU.mfn)];
-            SET_FIELD(guest_width, start_info, console.domU.mfn, *console_mfn);
-            SET_FIELD(guest_width, start_info, console.domU.evtchn, 
console_evtchn);
+            SET_FIELD(ctx->guest_width, start_info, nr_pages, ctx->p2m_size);
+            SET_FIELD(ctx->guest_width, start_info, shared_info, 
shared_info_frame<<PAGE_SHIFT);
+            SET_FIELD(ctx->guest_width, start_info, flags, 0);
+            *store_mfn = ctx->p2m[GET_FIELD(ctx->guest_width, start_info, 
store_mfn)];
+            SET_FIELD(ctx->guest_width, start_info, store_mfn, *store_mfn);
+            SET_FIELD(ctx->guest_width, start_info, store_evtchn, 
store_evtchn);
+            *console_mfn = ctx->p2m[GET_FIELD(ctx->guest_width, start_info, 
console.domU.mfn)];
+            SET_FIELD(ctx->guest_width, start_info, console.domU.mfn, 
*console_mfn);
+            SET_FIELD(ctx->guest_width, start_info, console.domU.evtchn, 
console_evtchn);
             munmap(start_info, PAGE_SIZE);
         }
         /* Uncanonicalise each GDT frame number. */
-        if ( GET_FIELD(guest_width, &ctxt, gdt_ents) > 8192 )
+        if ( GET_FIELD(ctx->guest_width, &ctxt, gdt_ents) > 8192 )
         {
             ERROR("GDT entry count out of range");
             goto out;
         }
 
-        for ( j = 0; (512*j) < GET_FIELD(guest_width, &ctxt, gdt_ents); j++ )
+        for ( j = 0; (512*j) < GET_FIELD(ctx->guest_width, &ctxt, gdt_ents); 
j++ )
         {
-            pfn = GET_FIELD(guest_width, &ctxt, gdt_frames[j]);
-            if ( (pfn >= p2m_size) ||
+            pfn = GET_FIELD(ctx->guest_width, &ctxt, gdt_frames[j]);
+            if ( (pfn >= ctx->p2m_size) ||
                  (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
             {
                 ERROR("GDT frame number %i (0x%lx) is bad", 
                       j, (unsigned long)pfn);
                 goto out;
             }
-            SET_FIELD(guest_width, &ctxt, gdt_frames[j], p2m[pfn]);
+            SET_FIELD(ctx->guest_width, &ctxt, gdt_frames[j], ctx->p2m[pfn]);
         }
         /* Uncanonicalise the page table base pointer. */
-        pfn = UNFOLD_CR3(guest_width, GET_FIELD(guest_width, &ctxt, 
ctrlreg[3]));
+        pfn = UNFOLD_CR3(ctx->guest_width, GET_FIELD(ctx->guest_width, &ctxt, 
ctrlreg[3]));
 
-        if ( pfn >= p2m_size )
+        if ( pfn >= ctx->p2m_size )
         {
             ERROR("PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
-                  pfn, p2m_size, pfn_type[pfn]);
+                  pfn, ctx->p2m_size, pfn_type[pfn]);
             goto out;
         }
 
         if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-             ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
+             ((unsigned long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
         {
             ERROR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
-                  pfn, p2m_size, pfn_type[pfn],
-                  (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
+                  pfn, ctx->p2m_size, pfn_type[pfn],
+                  (unsigned long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
             goto out;
         }
-        SET_FIELD(guest_width, &ctxt, ctrlreg[3], FOLD_CR3(guest_width, 
p2m[pfn]));
+        SET_FIELD(ctx->guest_width, &ctxt, ctrlreg[3], 
FOLD_CR3(ctx->guest_width, ctx->p2m[pfn]));
 
         /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
-        if ( (pt_levels == 4) && (ctxt.x64.ctrlreg[1] & 1) )
+        if ( (ctx->pt_levels == 4) && (ctxt.x64.ctrlreg[1] & 1) )
         {
-            pfn = UNFOLD_CR3(guest_width, ctxt.x64.ctrlreg[1] & ~1);
-            if ( pfn >= p2m_size )
+            pfn = UNFOLD_CR3(ctx->guest_width, ctxt.x64.ctrlreg[1] & ~1);
+            if ( pfn >= ctx->p2m_size )
             {
                 ERROR("User PT base is bad: pfn=%lu p2m_size=%lu",
-                      pfn, p2m_size);
+                      pfn, ctx->p2m_size);
                 goto out;
             }
             if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
-                 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
+                 ((unsigned long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) 
)
             {
                 ERROR("User PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
-                      pfn, p2m_size, pfn_type[pfn],
-                      (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
+                      pfn, ctx->p2m_size, pfn_type[pfn],
+                      (unsigned 
long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
                 goto out;
             }
-            ctxt.x64.ctrlreg[1] = FOLD_CR3(guest_width, p2m[pfn]);
+            ctxt.x64.ctrlreg[1] = FOLD_CR3(ctx->guest_width, ctx->p2m[pfn]);
         }
         domctl.cmd = XEN_DOMCTL_setvcpucontext;
         domctl.domain = (domid_t)dom;
@@ -1910,35 +1903,35 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
         xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
 
     /* restore saved vcpu_info and arch specific info */
-    MEMCPY_FIELD(guest_width, new_shared_info, old_shared_info, vcpu_info);
-    MEMCPY_FIELD(guest_width, new_shared_info, old_shared_info, arch);
+    MEMCPY_FIELD(ctx->guest_width, new_shared_info, old_shared_info, 
vcpu_info);
+    MEMCPY_FIELD(ctx->guest_width, new_shared_info, old_shared_info, arch);
 
     /* clear any pending events and the selector */
-    MEMSET_ARRAY_FIELD(guest_width, new_shared_info, evtchn_pending, 0);
+    MEMSET_ARRAY_FIELD(ctx->guest_width, new_shared_info, evtchn_pending, 0);
     for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
-           SET_FIELD(guest_width, new_shared_info, 
vcpu_info[i].evtchn_pending_sel, 0);
+           SET_FIELD(ctx->guest_width, new_shared_info, 
vcpu_info[i].evtchn_pending_sel, 0);
 
     /* mask event channels */
-    MEMSET_ARRAY_FIELD(guest_width, new_shared_info, evtchn_mask, 0xff);
+    MEMSET_ARRAY_FIELD(ctx->guest_width, new_shared_info, evtchn_mask, 0xff);
 
     /* leave wallclock time. set by hypervisor */
     munmap(new_shared_info, PAGE_SIZE);
 
     /* Uncanonicalise the pfn-to-mfn table frame-number list. */
-    for ( i = 0; i < P2M_FL_ENTRIES(p2m_size, guest_width); i++ )
+    for ( i = 0; i < P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width); i++ )
     {
         pfn = p2m_frame_list[i];
-        if ( (pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
+        if ( (pfn >= ctx->p2m_size) || (pfn_type[pfn] != 
XEN_DOMCTL_PFINFO_NOTAB) )
         {
             ERROR("PFN-to-MFN frame number %i (%#lx) is bad", i, pfn);
             goto out;
         }
-        p2m_frame_list[i] = p2m[pfn];
+        p2m_frame_list[i] = ctx->p2m[pfn];
     }
 
     /* Copy the P2M we've constructed to the 'live' P2M */
-    if ( !(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
-                                           p2m_frame_list, 
P2M_FL_ENTRIES(p2m_size, guest_width))) )
+    if ( !(ctx->live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
+                                           p2m_frame_list, 
P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width))) )
     {
         ERROR("Couldn't map p2m table");
         goto out;
@@ -1946,15 +1939,15 @@ int xc_domain_restore(int xc_handle, int io_fd, 
uint32_t dom,
 
     /* If the domain we're restoring has a different word size to ours,
      * we need to adjust the live_p2m assignment appropriately */
-    if ( guest_width > sizeof (xen_pfn_t) )
-        for ( i = p2m_size - 1; i >= 0; i-- )
-            ((int64_t *)live_p2m)[i] = (long)p2m[i];
-    else if ( guest_width < sizeof (xen_pfn_t) )
-        for ( i = 0; i < p2m_size; i++ )   
-            ((uint32_t *)live_p2m)[i] = p2m[i];
+    if ( ctx->guest_width > sizeof (xen_pfn_t) )
+        for ( i = ctx->p2m_size - 1; i >= 0; i-- )
+            ((int64_t *)ctx->live_p2m)[i] = (long)ctx->p2m[i];
+    else if ( ctx->guest_width < sizeof (xen_pfn_t) )
+        for ( i = 0; i < ctx->p2m_size; i++ )   
+            ((uint32_t *)ctx->live_p2m)[i] = ctx->p2m[i];
     else
-        memcpy(live_p2m, p2m, p2m_size * sizeof(xen_pfn_t));
-    munmap(live_p2m, P2M_FL_ENTRIES(p2m_size, guest_width) * PAGE_SIZE);
+        memcpy(ctx->live_p2m, ctx->p2m, ctx->p2m_size * sizeof(xen_pfn_t));
+    munmap(ctx->live_p2m, P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width) * 
PAGE_SIZE);
 
     DPRINTF("Domain ready to be built.\n");
     rc = 0;
@@ -2008,7 +2001,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     if ( (rc != 0) && (dom != 0) )
         xc_domain_destroy(xc_handle, dom);
     free(mmu);
-    free(p2m);
+    free(ctx->p2m);
     free(pfn_type);
     tailbuf_free(&tailbuf);
 
-- 
1.6.5.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.