[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] tools: libxc: restore: copy the correct page to memory



commit 035a72faf29eb691542f3fdb7cb347b323c72800
Author:     Hong Tao <bobby.hong@xxxxxxxxxx>
AuthorDate: Mon Sep 22 13:59:13 2014 +0800
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Mon Sep 22 15:18:15 2014 +0100

    tools: libxc: restore: copy the correct page to memory
    
    apply_batch() only handles MAX_BATCH_SIZE pages at one time. If
    there is some bogus/unmapped/allocate-only/broken page, we will
    skip it. So when we call apply_batch() again, the first page's
    index is curbatch - invalid_pages. invalid_pages stores the number
    of bogus/unmapped/allocate-only/broken pages we have found.
    
    In many cases, invalid_pages is 0, so we don't catch this error.
    
    Signed-off-by: Hong Tao <bobby.hong@xxxxxxxxxx>
    Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 tools/libxc/xc_domain_restore.c |   24 +++++++++++++++++++-----
 1 files changed, 19 insertions(+), 5 deletions(-)

diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index b9a56d5..bec716c 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -1106,7 +1106,7 @@ static int pagebuf_get(xc_interface *xch, struct 
restore_ctx *ctx,
 static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx 
*ctx,
                        xen_pfn_t* region_mfn, unsigned long* pfn_type, int 
pae_extended_cr3,
                        struct xc_mmu* mmu,
-                       pagebuf_t* pagebuf, int curbatch)
+                       pagebuf_t* pagebuf, int curbatch, int *invalid_pages)
 {
     int i, j, curpage, nr_mfns;
     int k, scount;
@@ -1121,6 +1121,12 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
     struct domain_info_context *dinfo = &ctx->dinfo;
     int* pfn_err = NULL;
     int rc = -1;
+    int local_invalid_pages = 0;
+    /* We have handled curbatch pages before this batch, and there are
+     * *invalid_pages pages that are not in pagebuf->pages. So the first
+     * page for this page is (curbatch - *invalid_pages) page.
+     */
+    int first_page = curbatch - *invalid_pages;
 
     unsigned long mfn, pfn, pagetype;
 
@@ -1293,10 +1299,13 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
         pfn      = pagebuf->pfn_types[i + curbatch] & 
~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  
XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB 
+        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
              || pagetype == XEN_DOMCTL_PFINFO_XALLOC)
+        {
+            local_invalid_pages++;
             /* a bogus/unmapped/allocate-only page: skip it */
             continue;
+        }
 
         if ( pagetype == XEN_DOMCTL_PFINFO_BROKEN )
         {
@@ -1306,6 +1315,8 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
                       "dom=%d, pfn=%lx\n", dom, pfn);
                 goto err_mapped;
             }
+
+            local_invalid_pages++;
             continue;
         }
 
@@ -1344,7 +1355,7 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
             }
         }
         else
-            memcpy(page, pagebuf->pages + (curpage + curbatch) * PAGE_SIZE,
+            memcpy(page, pagebuf->pages + (first_page + curpage) * PAGE_SIZE,
                    PAGE_SIZE);
 
         pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
@@ -1418,6 +1429,7 @@ static int apply_batch(xc_interface *xch, uint32_t dom, 
struct restore_ctx *ctx,
     } /* end of 'batch' for loop */
 
     rc = nraces;
+    *invalid_pages += local_invalid_pages;
 
   err_mapped:
     munmap(region_base, j*PAGE_SIZE);
@@ -1621,7 +1633,7 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
  loadpages:
     for ( ; ; )
     {
-        int j, curbatch;
+        int j, curbatch, invalid_pages;
 
         xc_report_progress_step(xch, n, dinfo->p2m_size);
 
@@ -1665,11 +1677,13 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
 
         /* break pagebuf into batches */
         curbatch = 0;
+        invalid_pages = 0;
         while ( curbatch < j ) {
             int brc;
 
             brc = apply_batch(xch, dom, ctx, region_mfn, pfn_type,
-                              pae_extended_cr3, mmu, &pagebuf, curbatch);
+                              pae_extended_cr3, mmu, &pagebuf, curbatch,
+                              &invalid_pages);
             if ( brc < 0 )
                 goto out;
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.