[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v20210713 16/31] tools: restore: preallocate mfns array



Remove repeated allocation from migration loop. There will never be
more than MAX_BATCH_SIZE pages to process in an incoming batch.
Allocate the space once.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
 tools/libs/guest/xg_sr_common.h  |  1 +
 tools/libs/guest/xg_sr_restore.c | 16 ++++++++--------
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/tools/libs/guest/xg_sr_common.h b/tools/libs/guest/xg_sr_common.h
index bd2027ba5d..11e45dbdc5 100644
--- a/tools/libs/guest/xg_sr_common.h
+++ b/tools/libs/guest/xg_sr_common.h
@@ -258,6 +258,7 @@ struct xc_sr_context
             struct restore_callbacks *callbacks;
             xen_pfn_t *pfns;
             uint32_t *types;
+            xen_pfn_t *mfns;
 
             int send_back_fd;
             unsigned long p2m_size;
diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index bbb55b6844..24cd8c6a07 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -204,7 +204,6 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
                              xen_pfn_t *pfns, uint32_t *types, void *page_data)
 {
     xc_interface *xch = ctx->xch;
-    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
     int *map_errs = malloc(count * sizeof(*map_errs));
     int rc;
     void *mapping = NULL, *guest_page = NULL;
@@ -212,11 +211,11 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
         j,          /* j indexes the subset of pfns we decide to map. */
         nr_pages = 0;
 
-    if ( !mfns || !map_errs )
+    if ( !map_errs )
     {
         rc = -1;
         ERROR("Failed to allocate %zu bytes to process page data",
-              count * (sizeof(*mfns) + sizeof(*map_errs)));
+              count * sizeof(*map_errs));
         goto err;
     }
 
@@ -232,7 +231,7 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
         ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
 
         if ( page_type_has_stream_data(types[i]) )
-            mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
+            ctx->restore.mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, 
pfns[i]);
     }
 
     /* Nothing to do? */
@@ -241,7 +240,7 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
 
     mapping = guest_page = xenforeignmemory_map(
         xch->fmem, ctx->domid, PROT_READ | PROT_WRITE,
-        nr_pages, mfns, map_errs);
+        nr_pages, ctx->restore.mfns, map_errs);
     if ( !mapping )
     {
         rc = -1;
@@ -259,7 +258,7 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
         {
             rc = -1;
             ERROR("Mapping pfn %#"PRIpfn" (mfn %#"PRIpfn", type %#"PRIx32") 
failed with %d",
-                  pfns[i], mfns[j], types[i], map_errs[j]);
+                  pfns[i], ctx->restore.mfns[j], types[i], map_errs[j]);
             goto err;
         }
 
@@ -298,7 +297,6 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned int count,
         xenforeignmemory_unmap(xch->fmem, mapping, nr_pages);
 
     free(map_errs);
-    free(mfns);
 
     return rc;
 }
@@ -718,7 +716,8 @@ static int setup(struct xc_sr_context *ctx)
 
     ctx->restore.pfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.pfns));
     ctx->restore.types = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.types));
-    if ( !ctx->restore.pfns  || !ctx->restore.types )
+    ctx->restore.mfns = malloc(MAX_BATCH_SIZE * sizeof(*ctx->restore.mfns));
+    if ( !ctx->restore.pfns || !ctx->restore.types || !ctx->restore.mfns )
     {
         ERROR("Unable to allocate memory");
         rc = -1;
@@ -755,6 +754,7 @@ static void cleanup(struct xc_sr_context *ctx)
 
     free(ctx->restore.buffered_records);
     free(ctx->restore.populated_pfns);
+    free(ctx->restore.mfns);
     free(ctx->restore.types);
     free(ctx->restore.pfns);
 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.