[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 05/20] libxc/xc_sr: factor out filter_pages()



When processing a PAGE_DATA record, the restore side needs to set the
types of incoming pages using the appropriate restore op and filter the
list of pfns in the record to the subset that are 'backed' - ie.
accompanied by real backing data in the stream that needs to be filled
in.

Both of these steps are also required when processing postcopy records,
so factor it out into a common helper.

No functional change.

Signed-off-by: Joshua Otto <jtotto@xxxxxxxxxxxx>
---
 tools/libxc/xc_sr_restore.c | 100 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 75 insertions(+), 25 deletions(-)

diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index 481a904..8574ee8 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -194,6 +194,68 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned 
count,
     return rc;
 }
 
+static void set_page_types(struct xc_sr_context *ctx, unsigned count,
+                           xen_pfn_t *pfns, uint32_t *types)
+{
+    unsigned i;
+
+    for ( i = 0; i < count; ++i )
+        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
+}
+
+/*
+ * Given count pfns and their types, allocate and fill in buffer bpfns with 
only
+ * those pfns that are 'backed' by real page data that needs to be migrated.
+ * The caller must later free() *bpfns.
+ *
+ * Returns 0 on success and non-0 on failure.  *bpfns can be free()ed even 
after
+ * failure.
+ */
+static int filter_pages(struct xc_sr_context *ctx,
+                        unsigned count,
+                        xen_pfn_t *pfns,
+                        uint32_t *types,
+                        /* OUT */ unsigned *nr_pages,
+                        /* OUT */ xen_pfn_t **bpfns)
+{
+    xc_interface *xch = ctx->xch;
+    unsigned i;
+
+    *nr_pages = 0;
+    *bpfns = malloc(count * sizeof(*bpfns));
+    if ( !(*bpfns) )
+    {
+        ERROR("Failed to allocate %zu bytes to process page data",
+              count * (sizeof(*bpfns)));
+        return -1;
+    }
+
+    for ( i = 0; i < count; ++i )
+    {
+        switch ( types[i] )
+        {
+        case XEN_DOMCTL_PFINFO_NOTAB:
+
+        case XEN_DOMCTL_PFINFO_L1TAB:
+        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L2TAB:
+        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L3TAB:
+        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+        case XEN_DOMCTL_PFINFO_L4TAB:
+        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+
+            (*bpfns)[(*nr_pages)++] = pfns[i];
+            break;
+        }
+    }
+
+    return 0;
+}
+
 /*
  * Given a list of pfns, their types, and a block of page data from the
  * stream, populate and record their types, map the relevant subset and copy
@@ -203,7 +265,7 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned count,
                              xen_pfn_t *pfns, uint32_t *types, void *page_data)
 {
     xc_interface *xch = ctx->xch;
-    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
+    xen_pfn_t *mfns = NULL;
     int *map_errs = malloc(count * sizeof(*map_errs));
     int rc;
     void *mapping = NULL, *guest_page = NULL;
@@ -211,11 +273,11 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned count,
         j,         /* j indexes the subset of pfns we decide to map. */
         nr_pages = 0;
 
-    if ( !mfns || !map_errs )
+    if ( !map_errs )
     {
         rc = -1;
         ERROR("Failed to allocate %zu bytes to process page data",
-              count * (sizeof(*mfns) + sizeof(*map_errs)));
+              count * sizeof(*map_errs));
         goto err;
     }
 
@@ -226,31 +288,19 @@ static int process_page_data(struct xc_sr_context *ctx, 
unsigned count,
         goto err;
     }
 
-    for ( i = 0; i < count; ++i )
-    {
-        ctx->restore.ops.set_page_type(ctx, pfns[i], types[i]);
-
-        switch ( types[i] )
-        {
-        case XEN_DOMCTL_PFINFO_NOTAB:
-
-        case XEN_DOMCTL_PFINFO_L1TAB:
-        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-        case XEN_DOMCTL_PFINFO_L2TAB:
-        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
+    set_page_types(ctx, count, pfns, types);
 
-        case XEN_DOMCTL_PFINFO_L3TAB:
-        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-        case XEN_DOMCTL_PFINFO_L4TAB:
-        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
-
-            mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]);
-            break;
-        }
+    rc = filter_pages(ctx, count, pfns, types, &nr_pages, &mfns);
+    if ( rc )
+    {
+        ERROR("Failed to filter mfns for batch of %u pages", count);
+        goto err;
     }
 
+    /* Map physically-backed pfns ('bpfns') to their gmfns. */
+    for ( i = 0; i < nr_pages; ++i )
+        mfns[i] = ctx->restore.ops.pfn_to_gfn(ctx, mfns[i]);
+
     /* Nothing to do? */
     if ( nr_pages == 0 )
         goto done;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.