[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 1/3] libxc: introduce XC_SAVE_ID_TOOLSTACK
Introduce a new save_id to save/restore toolstack specific extra information. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- tools/libxc/xc_domain_restore.c | 46 ++++++++++++++++++++++++++++++++++++++- tools/libxc/xc_domain_save.c | 17 ++++++++++++++ tools/libxc/xenguest.h | 23 ++++++++++++++++++- tools/libxc/xg_save_restore.h | 1 + tools/libxl/libxl_dom.c | 2 +- tools/xcutils/xc_restore.c | 2 +- 6 files changed, 87 insertions(+), 4 deletions(-) diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c index 3e4d518..90408e6 100644 --- a/tools/libxc/xc_domain_restore.c +++ b/tools/libxc/xc_domain_restore.c @@ -659,6 +659,11 @@ static void tailbuf_free(tailbuf_t *buf) tailbuf_free_pv(&buf->u.pv); } +struct toolstack_data_t { + uint8_t *data; + uint32_t len; +}; + typedef struct { void* pages; /* pages is of length nr_physpages, pfn_types is of length nr_pages */ @@ -685,6 +690,8 @@ typedef struct { uint64_t acpi_ioport_location; uint64_t viridian; uint64_t vm_generationid_addr; + + struct toolstack_data_t tdata; } pagebuf_t; static int pagebuf_init(pagebuf_t* buf) @@ -695,6 +702,10 @@ static int pagebuf_init(pagebuf_t* buf) static void pagebuf_free(pagebuf_t* buf) { + if (buf->tdata.data != NULL) { + free(buf->tdata.data); + buf->tdata.data = NULL; + } if (buf->pages) { free(buf->pages); buf->pages = NULL; @@ -863,6 +874,19 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx, } return pagebuf_get_one(xch, ctx, buf, fd, dom); + case XC_SAVE_ID_TOOLSTACK: + { + RDEXACT(fd, &buf->tdata.len, sizeof(buf->tdata.len)); + buf->tdata.data = (uint8_t*) realloc(buf->tdata.data, buf->tdata.len); + if ( buf->tdata.data == NULL ) + { + PERROR("error memory allocation"); + return -1; + } + RDEXACT(fd, buf->tdata.data, buf->tdata.len); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + } + case XC_SAVE_ID_ENABLE_COMPRESSION: /* We cannot set compression flag directly in pagebuf structure, * since this pagebuf still has uncompressed pages that are yet to @@ -1299,7 +1323,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned long *console_mfn, domid_t console_domid, unsigned int hvm, unsigned int pae, int superpages, int no_incr_generationid, - unsigned long *vm_generationid_addr) + unsigned long *vm_generationid_addr, + struct restore_callbacks *callbacks) { DECLARE_DOMCTL; int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0; @@ -1347,6 +1372,7 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, pagebuf_t pagebuf; tailbuf_t tailbuf, tmptail; + struct toolstack_data_t tdata, tdatatmp; void* vcpup; uint64_t console_pfn = 0; @@ -1359,6 +1385,7 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, pagebuf_init(&pagebuf); memset(&tailbuf, 0, sizeof(tailbuf)); tailbuf.ishvm = hvm; + memset(&tdata, 0, sizeof(tdata)); memset(ctx, 0, sizeof(*ctx)); @@ -1624,6 +1651,10 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, ERROR("Error, unknow acpi ioport location (%i)", pagebuf.acpi_ioport_location); } + tdatatmp = tdata; + tdata = pagebuf.tdata; + pagebuf.tdata = tdatatmp; + if ( ctx->last_checkpoint ) { // DPRINTF("Last checkpoint, finishing\n"); @@ -2074,6 +2105,19 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, goto out; finish_hvm: + if ( callbacks != NULL && callbacks->toolstack_restore != NULL && + tdata.data != NULL ) + { + if ( callbacks->toolstack_restore(dom, tdata.data, tdata.len, + callbacks->data) < 0 ) + { + PERROR("error calling toolstack_restore"); + free(tdata.data); + goto out; + } + } + free(tdata.data); + /* Dump the QEMU state to a state file for QEMU to load */ if ( dump_qemu(xch, dom, &tailbuf.u.hvm) ) { PERROR("Error dumping QEMU state to file"); diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c index a9216dd..fcc7718 100644 --- a/tools/libxc/xc_domain_save.c +++ b/tools/libxc/xc_domain_save.c @@ -1723,6 +1723,23 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iter } } + if ( callbacks != NULL && callbacks->toolstack_save != NULL ) + { + int id = XC_SAVE_ID_TOOLSTACK; + uint8_t *buf; + uint32_t len; + + if ( callbacks->toolstack_save(dom, &buf, &len, callbacks->data) < 0 ) + { + PERROR("Error calling toolstack_save"); + goto out; + } + wrexact(io_fd, &id, sizeof(id)); + wrexact(io_fd, &len, sizeof(len)); + wrexact(io_fd, buf, len); + free(buf); + } + if ( !callbacks->checkpoint ) { /* diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h index 8d885d3..6435f65 100644 --- a/tools/libxc/xenguest.h +++ b/tools/libxc/xenguest.h @@ -44,6 +44,14 @@ struct save_callbacks { /* Enable qemu-dm logging dirty pages to xen */ int (*switch_qemu_logdirty)(int domid, unsigned enable, void *data); /* HVM only */ + /* Save toolstack specific data + * @param buf the buffer with the data to be saved + * @param len the length of the buffer + * The callee allocates the buffer, the caller frees it (buffer must + * be free'able). + */ + int (*toolstack_save)(uint32_t domid, uint8_t **buf, uint32_t *len, void *data); + /* to be provided as the last argument to each callback function */ void* data; }; @@ -62,6 +70,16 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iter unsigned long vm_generationid_addr); +/* callbacks provided by xc_domain_restore */ +struct restore_callbacks { + /* callback to restore toolstack specific data */ + int (*toolstack_restore)(uint32_t domid, uint8_t *buf, + uint32_t size, void* data); + + /* to be provided as the last argument to each callback function */ + void* data; +}; + /** * This function will restore a saved domain. * @@ -75,6 +93,8 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iter * @parm superpages non-zero to allocate guest memory with superpages * @parm no_incr_generationid non-zero if generation id is NOT to be incremented * @parm vm_generationid_addr returned with the address of the generation id buffer + * @parm callbacks non-NULL to receive a callback to restore toolstack + * specific data * @return 0 on success, -1 on failure */ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, @@ -83,7 +103,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned long *console_mfn, domid_t console_domid, unsigned int hvm, unsigned int pae, int superpages, int no_incr_generationid, - unsigned long *vm_generationid_addr); + unsigned long *vm_generationid_addr, + struct restore_callbacks *callbacks); /** * xc_domain_restore writes a file to disk that contains the device * model saved state. diff --git a/tools/libxc/xg_save_restore.h b/tools/libxc/xg_save_restore.h index 89f3504..04e7892 100644 --- a/tools/libxc/xg_save_restore.h +++ b/tools/libxc/xg_save_restore.h @@ -258,6 +258,7 @@ #define XC_SAVE_ID_HVM_PAGING_RING_PFN -15 #define XC_SAVE_ID_HVM_ACCESS_RING_PFN -16 #define XC_SAVE_ID_HVM_SHARING_RING_PFN -17 +#define XC_SAVE_ID_TOOLSTACK -18 /* Optional toolstack specific info */ /* ** We process save/restore/migrate in batches of pages; the below diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index 9b33267..5c4c972 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -422,7 +422,7 @@ int libxl__domain_restore_common(libxl__gc *gc, uint32_t domid, state->store_domid, state->console_port, &state->console_mfn, state->console_domid, hvm, pae, superpages, no_incr_generationid, - &state->vm_generationid_addr); + &state->vm_generationid_addr, NULL); if ( rc ) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "restoring domain"); return ERROR_FAIL; diff --git a/tools/xcutils/xc_restore.c b/tools/xcutils/xc_restore.c index e41a133..0235579 100644 --- a/tools/xcutils/xc_restore.c +++ b/tools/xcutils/xc_restore.c @@ -47,7 +47,7 @@ main(int argc, char **argv) ret = xc_domain_restore(xch, io_fd, domid, store_evtchn, &store_mfn, 0, console_evtchn, &console_mfn, 0, hvm, pae, superpages, - 0, NULL); + 0, NULL, NULL); if ( ret == 0 ) { -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |