[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v5 RFC 14/14] tools/libxc: noarch restore code
On 06/12/2014 02:14 AM, Andrew Cooper wrote: Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxxx> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> --- tools/libxc/saverestore/common.h | 6 + tools/libxc/saverestore/restore.c | 556 ++++++++++++++++++++++++++++++++++++- 2 files changed, 561 insertions(+), 1 deletion(-) diff --git a/tools/libxc/saverestore/common.h b/tools/libxc/saverestore/common.h index e16e0de..2d44961 100644 --- a/tools/libxc/saverestore/common.h +++ b/tools/libxc/saverestore/common.h @@ -292,6 +292,12 @@ static inline int write_record(struct context *ctx, struct record *rec) return write_split_record(ctx, rec, NULL, 0); } ...snip... +/* + * Given a list of pfns, their types, and a block of page data from the + * stream, populate and record their types, map the relevent subset and copy + * the data into the guest. + */ +static int process_page_data(struct context *ctx, unsigned count, + xen_pfn_t *pfns, uint32_t *types, void *page_data) +{ + xc_interface *xch = ctx->xch; + xen_pfn_t *mfns = malloc(count * sizeof(*mfns)); + int *map_errs = malloc(count * sizeof(*map_errs)); + int rc = -1; + void *mapping = NULL, *guest_page = NULL; + unsigned i, /* i indexes the pfns from the record. */ + j, /* j indexes the subset of pfns we decide to map. */ + nr_pages; + + if ( !mfns || !map_errs ) + { + ERROR("Failed to allocate %zu bytes to process page data", + count * (sizeof(*mfns) + sizeof(*map_errs))); + goto err; + } + + rc = populate_pfns(ctx, count, pfns, types); + if ( rc ) + { + ERROR("Failed to populate pfns for batch of %u pages", count); + goto err; + } + rc = -1; + + for ( i = 0, nr_pages = 0; i < count; ++i ) + { + ctx->ops.set_page_type(ctx, pfns[i], types[i]); + + switch ( types[i] ) + { + case XEN_DOMCTL_PFINFO_NOTAB: + + case XEN_DOMCTL_PFINFO_L1TAB: + case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB: + + case XEN_DOMCTL_PFINFO_L2TAB: + case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB: + + case XEN_DOMCTL_PFINFO_L3TAB: + case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB: + + case XEN_DOMCTL_PFINFO_L4TAB: + case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB: + + mfns[nr_pages++] = ctx->ops.pfn_to_gfn(ctx, pfns[i]); + break; + } + + } + + if ( nr_pages > 0 ) + { + mapping = guest_page = xc_map_foreign_bulk( + xch, ctx->domid, PROT_READ | PROT_WRITE, + mfns, map_errs, nr_pages); + if ( !mapping ) + { + PERROR("Unable to map %u mfns for %u pages of data", + nr_pages, count); + goto err; + } + } + + for ( i = 0, j = 0; i < count; ++i ) + { + switch ( types[i] ) + { + case XEN_DOMCTL_PFINFO_XTAB: + case XEN_DOMCTL_PFINFO_BROKEN: + case XEN_DOMCTL_PFINFO_XALLOC: + /* No page data to deal with. */ + continue; + } + + if ( map_errs[j] ) + { + ERROR("Mapping pfn %lx (mfn %lx, type %#"PRIx32")failed with %d", + pfns[i], mfns[j], types[i], map_errs[j]); missing rc = -1 here, rc could be 0 because in the following called: rc = ctx->restore.ops.localise_page(ctx, types[i], guest_page); + goto err; + } + + memcpy(guest_page, page_data, PAGE_SIZE); + + /* Undo page normalisation done by the saver. */ + rc = ctx->restore.ops.localise_page(ctx, types[i], guest_page); + if ( rc ) + { + DPRINTF("Failed to localise"); + goto err; + } + + ++j; + guest_page += PAGE_SIZE; + page_data += PAGE_SIZE; + } + + rc = 0; + + err: + if ( mapping ) + munmap(mapping, nr_pages * PAGE_SIZE); + + free(map_errs); + free(mfns); + + return rc; +} + +/* + * Validate a PAGE_DATA record from the stream, and pass the results to + * process_page_data() to actually perform the legwork. + */ +static int handle_page_data(struct context *ctx, struct record *rec) +{ + xc_interface *xch = ctx->xch; + struct rec_page_data_header *pages = rec->data; + unsigned i, pages_of_data = 0; + int rc = -1; + + xen_pfn_t *pfns = NULL, pfn; + uint32_t *types = NULL, type; + + if ( rec->length < sizeof(*pages) ) + { + ERROR("PAGE_DATA record truncated: length %"PRIu32", min %zu", + rec->length, sizeof(*pages)); + goto err; + } + else if ( pages->count < 1 ) + { + ERROR("Expected at least 1 pfn in PAGE_DATA record"); + goto err; + } + else if ( rec->length < sizeof(*pages) + (pages->count * sizeof(uint64_t)) ) + { + ERROR("PAGE_DATA record (length %"PRIu32") too short to contain %" + PRIu32" pfns worth of information", rec->length, pages->count); + goto err; + } + + pfns = malloc(pages->count * sizeof(*pfns)); + types = malloc(pages->count * sizeof(*types)); + if ( !pfns || !types ) + { + ERROR("Unable to allocate enough memory for %"PRIu32" pfns", + pages->count); + goto err; + } + + for ( i = 0; i < pages->count; ++i ) + { + pfn = pages->pfn[i] & PAGE_DATA_PFN_MASK; + if ( !ctx->ops.pfn_is_valid(ctx, pfn) ) + { + ERROR("pfn %#lx (index %u) outside domain maximum", pfn, i); + goto err; + } + + type = (pages->pfn[i] & PAGE_DATA_TYPE_MASK) >> 32; + if ( ((type >> XEN_DOMCTL_PFINFO_LTAB_SHIFT) >= 5) && + ((type >> XEN_DOMCTL_PFINFO_LTAB_SHIFT) <= 8) ) + { + ERROR("Invalid type %#"PRIx32" for pfn %#lx (index %u)", type, pfn, i); + goto err; + } + else if ( type < XEN_DOMCTL_PFINFO_BROKEN ) + /* NOTAB and all L1 thru L4 tables (including pinned) should have + * a page worth of data in the record. */ + pages_of_data++; + + pfns[i] = pfn; + types[i] = type; + } + + if ( rec->length != (sizeof(*pages) + + (sizeof(uint64_t) * pages->count) + + (PAGE_SIZE * pages_of_data)) ) + { + ERROR("PAGE_DATA record wrong size: length %"PRIu32", expected " + "%zu + %zu + %zu", rec->length, sizeof(*pages), + (sizeof(uint64_t) * pages->count), (PAGE_SIZE * pages_of_data)); + goto err; + } + + rc = process_page_data(ctx, pages->count, pfns, types, + &pages->pfn[pages->count]); + err: + free(types); + free(pfns); + + return rc; +} + +/* + * Restore a domain. + */ +static int restore(struct context *ctx) +{ + xc_interface *xch = ctx->xch; + struct record rec; + int rc, saved_rc = 0, saved_errno = 0; + + IPRINTF("Restoring domain"); + + rc = ctx->restore.ops.setup(ctx); + if ( rc ) + goto err; + + do + { + rc = read_record(ctx, &rec); + if ( rc ) + goto err; + + switch ( rec.type ) + { + case REC_TYPE_END: + DPRINTF("End record"); + break; + + case REC_TYPE_PAGE_DATA: + rc = handle_page_data(ctx, &rec); + break; + + default: + rc = ctx->restore.ops.process_record(ctx, &rec); + break; + } + + free(rec.data); + if ( rc ) + goto err; + + } while ( rec.type != REC_TYPE_END ); + + rc = ctx->restore.ops.stream_complete(ctx); + if ( rc ) + goto err; + + IPRINTF("Restore successful"); + goto done; + + err: + saved_errno = errno; + saved_rc = rc; + PERROR("Restore failed"); + + done: + free(ctx->restore.populated_pfns); + rc = ctx->restore.ops.cleanup(ctx); + if ( rc ) + PERROR("Failed to clean up"); + + if ( saved_rc ) + { + rc = saved_rc; + errno = saved_errno; + } + + return rc; +} + int xc_domain_restore2(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, domid_t store_domid, unsigned int console_evtchn, @@ -8,8 +502,68 @@ int xc_domain_restore2(xc_interface *xch, int io_fd, uint32_t dom, int checkpointed_stream, struct restore_callbacks *callbacks) { + struct context ctx = + { + .xch = xch, + .fd = io_fd, + }; + + /* GCC 4.4 (of CentOS 6.x vintage) can' t initialise anonymous unions :( */ + ctx.restore.console_evtchn = console_evtchn; + ctx.restore.console_domid = console_domid; + ctx.restore.xenstore_evtchn = store_evtchn; + ctx.restore.xenstore_domid = store_domid; + ctx.restore.callbacks = callbacks; + IPRINTF("In experimental %s", __func__); - return -1; + + if ( xc_domain_getinfo(xch, dom, 1, &ctx.dominfo) != 1 ) + { + PERROR("Failed to get domain info"); + return -1; + } + + if ( ctx.dominfo.domid != dom ) + { + ERROR("Domain %d does not exist", dom); + return -1; + } + + ctx.domid = dom; + IPRINTF("Restoring domain %d", dom); + + if ( read_headers(&ctx) ) + return -1; + + if ( ctx.dominfo.hvm ) + { + ctx.ops = common_ops_x86_hvm; + ctx.restore.ops = restore_ops_x86_hvm; + if ( restore(&ctx) ) + return -1; + } + else + { + ctx.ops = common_ops_x86_pv; + ctx.restore.ops = restore_ops_x86_pv; + if ( restore(&ctx) ) + return -1; + } + + DPRINTF("XenStore: mfn %#lx, dom %d, evt %u", + ctx.restore.xenstore_mfn, + ctx.restore.xenstore_domid, + ctx.restore.xenstore_evtchn); + + DPRINTF("Console: mfn %#lx, dom %d, evt %u", + ctx.restore.console_mfn, + ctx.restore.console_domid, + ctx.restore.console_evtchn); + + *console_mfn = ctx.restore.console_mfn; + *store_mfn = ctx.restore.xenstore_mfn; + + return 0; } /* -- Thanks, Yang. _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |