[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC XEN v1 13/14] tools: libxc: wire up migration for ARM
This seems almost too easy. It's possible there is some scope for sharing more with the x86/HVM side. Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> Cc: andyhhp --- config/arm32.mk | 1 + config/arm64.mk | 1 + tools/libxc/Makefile | 3 + tools/libxc/xc_sr_common.h | 19 ++++ tools/libxc/xc_sr_common_arm.c | 22 ++++ tools/libxc/xc_sr_common_arm.h | 15 +++ tools/libxc/xc_sr_restore_arm.c | 225 ++++++++++++++++++++++++++++++++++++++++ tools/libxc/xc_sr_save_arm.c | 183 ++++++++++++++++++++++++++++++++ 8 files changed, 469 insertions(+) create mode 100644 tools/libxc/xc_sr_common_arm.c create mode 100644 tools/libxc/xc_sr_common_arm.h create mode 100644 tools/libxc/xc_sr_restore_arm.c create mode 100644 tools/libxc/xc_sr_save_arm.c diff --git a/config/arm32.mk b/config/arm32.mk index cd97e42..5389429 100644 --- a/config/arm32.mk +++ b/config/arm32.mk @@ -1,6 +1,7 @@ CONFIG_ARM := y CONFIG_ARM_32 := y CONFIG_ARM_$(XEN_OS) := y +CONFIG_MIGRATE := y CONFIG_XEN_INSTALL_SUFFIX := diff --git a/config/arm64.mk b/config/arm64.mk index c5deb4e..6d38ef9 100644 --- a/config/arm64.mk +++ b/config/arm64.mk @@ -1,6 +1,7 @@ CONFIG_ARM := y CONFIG_ARM_64 := y CONFIG_ARM_$(XEN_OS) := y +CONFIG_MIGRATE := y CONFIG_XEN_INSTALL_SUFFIX := diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile index cd52d77..d43a060 100644 --- a/tools/libxc/Makefile +++ b/tools/libxc/Makefile @@ -61,6 +61,9 @@ GUEST_SRCS-$(CONFIG_X86) += xc_sr_restore_x86_pv.c GUEST_SRCS-$(CONFIG_X86) += xc_sr_restore_x86_hvm.c GUEST_SRCS-$(CONFIG_X86) += xc_sr_save_x86_pv.c GUEST_SRCS-$(CONFIG_X86) += xc_sr_save_x86_hvm.c +GUEST_SRCS-$(CONFIG_ARM) += xc_sr_common_arm.c +GUEST_SRCS-$(CONFIG_ARM) += xc_sr_save_arm.c +GUEST_SRCS-$(CONFIG_ARM) += xc_sr_restore_arm.c GUEST_SRCS-y += xc_sr_restore.c GUEST_SRCS-y += xc_sr_save.c GUEST_SRCS-$(CONFIG_X86) += xc_offline_page.c xc_compression.c diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h index 0d36c8d..9cc8be3 100644 --- a/tools/libxc/xc_sr_common.h +++ b/tools/libxc/xc_sr_common.h @@ -315,6 +315,25 @@ struct xc_sr_context } restore; }; } x86_hvm; + + struct /* ARM guest. */ + { + union + { + struct + { + } save; + + struct + { + /* HVM context blob. */ + void *context; + size_t contextsz; + + } restore; + }; + } arm; + }; }; diff --git a/tools/libxc/xc_sr_common_arm.c b/tools/libxc/xc_sr_common_arm.c new file mode 100644 index 0000000..d157bfe --- /dev/null +++ b/tools/libxc/xc_sr_common_arm.c @@ -0,0 +1,22 @@ +#include "xc_sr_common_arm.h" + +void xc_sr_select_save_ops(struct xc_sr_context *ctx) +{ + ctx->save.guest_type = DHDR_TYPE_ARM; + ctx->save.ops = save_ops_arm; +} + +void xc_sr_select_restore_ops(struct xc_sr_context *ctx) +{ + ctx->restore.ops = restore_ops_arm; +} + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/tools/libxc/xc_sr_common_arm.h b/tools/libxc/xc_sr_common_arm.h new file mode 100644 index 0000000..efbea70 --- /dev/null +++ b/tools/libxc/xc_sr_common_arm.h @@ -0,0 +1,15 @@ +#ifndef __COMMON_ARM__H +#define __COMMON_ARM__H + +#include "xc_sr_common.h" + +#endif +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/tools/libxc/xc_sr_restore_arm.c b/tools/libxc/xc_sr_restore_arm.c new file mode 100644 index 0000000..d62739e --- /dev/null +++ b/tools/libxc/xc_sr_restore_arm.c @@ -0,0 +1,225 @@ +#include <assert.h> +#include <arpa/inet.h> + +#include "xc_sr_common_arm.h" + +/* + * Process an HVM_CONTEXT record from the stream. + */ +static int handle_hvm_context(struct xc_sr_context *ctx, + struct xc_sr_record *rec) +{ + xc_interface *xch = ctx->xch; + void *p; + + p = malloc(rec->length); + if ( !p ) + { + ERROR("Unable to allocate %u bytes for hvm context", rec->length); + return -1; + } + + free(ctx->arm.restore.context); + + ctx->arm.restore.context = memcpy(p, rec->data, rec->length); + ctx->arm.restore.contextsz = rec->length; + + return 0; +} + +/* + * Process an HVM_PARAMS record from the stream. + */ +static int handle_hvm_params(struct xc_sr_context *ctx, + struct xc_sr_record *rec) +{ + xc_interface *xch = ctx->xch; + struct xc_sr_rec_hvm_params *hdr = rec->data; + struct xc_sr_rec_hvm_params_entry *entry = hdr->param; + unsigned int i; + int rc; + + if ( rec->length < sizeof(*hdr) + || rec->length < sizeof(*hdr) + hdr->count * sizeof(*entry) ) + { + ERROR("hvm_params record is too short"); + return -1; + } + + for ( i = 0; i < hdr->count; i++, entry++ ) + { + switch ( entry->index ) + { + case HVM_PARAM_CONSOLE_PFN: + ctx->restore.console_gfn = entry->value; + xc_clear_domain_page(xch, ctx->domid, entry->value); + break; + case HVM_PARAM_STORE_PFN: + ctx->restore.xenstore_gfn = entry->value; + xc_clear_domain_page(xch, ctx->domid, entry->value); + break; + } + + rc = xc_hvm_param_set(xch, ctx->domid, entry->index, entry->value); + if ( rc < 0 ) + { + PERROR("set HVM param %"PRId64" = 0x%016"PRIx64, + entry->index, entry->value); + return rc; + } + } + return 0; +} + +/* restore_ops function. */ +static bool arm_pfn_is_valid(const struct xc_sr_context *ctx, xen_pfn_t pfn) +{ + return true; +} + +/* restore_ops function. */ +static xen_pfn_t arm_pfn_to_gfn(const struct xc_sr_context *ctx, + xen_pfn_t pfn) +{ + return pfn; +} + +/* restore_ops function. */ +static void arm_set_gfn(struct xc_sr_context *ctx, xen_pfn_t pfn, + xen_pfn_t gfn) +{ + /* no op */ +} + +/* restore_ops function. */ +static void arm_set_page_type(struct xc_sr_context *ctx, + xen_pfn_t pfn, xen_pfn_t type) +{ + /* no-op */ +} + +/* restore_ops function. */ +static int arm_localise_page(struct xc_sr_context *ctx, + uint32_t type, void *page) +{ + /* no-op */ + return 0; +} + +/* + * restore_ops function. Confirms the stream matches the domain. + */ +static int arm_setup(struct xc_sr_context *ctx) +{ + xc_interface *xch = ctx->xch; + + if ( ctx->restore.guest_type != DHDR_TYPE_ARM ) + { + ERROR("Unable to restore %s domain into an arm domain", + dhdr_type_to_str(ctx->restore.guest_type)); + return -1; + } + else if ( ctx->restore.guest_page_size != PAGE_SIZE ) + { + ERROR("Invalid page size %u for arm domains", + ctx->restore.guest_page_size); + return -1; + } + + return 0; +} + +/* + * restore_ops function. + */ +static int arm_process_record(struct xc_sr_context *ctx, + struct xc_sr_record *rec) +{ + switch ( rec->type ) + { + case REC_TYPE_HVM_CONTEXT: + return handle_hvm_context(ctx, rec); + + case REC_TYPE_HVM_PARAMS: + return handle_hvm_params(ctx, rec); + + default: + return RECORD_NOT_PROCESSED; + } +} + +/* + * restore_ops function. Sets extra hvm parameters and seeds the grant table. + */ +static int arm_stream_complete(struct xc_sr_context *ctx) +{ + xc_interface *xch = ctx->xch; + int rc; + + rc = xc_hvm_param_set(xch, ctx->domid, HVM_PARAM_STORE_EVTCHN, + ctx->restore.xenstore_evtchn); + if ( rc ) + { + PERROR("Failed to set HVM_PARAM_STORE_EVTCHN"); + return rc; + } + + rc = xc_hvm_param_set(xch, ctx->domid, HVM_PARAM_CONSOLE_EVTCHN, + ctx->restore.console_evtchn); + if ( rc ) + { + PERROR("Failed to set HVM_PARAM_CONSOLE_EVTCHN"); + return rc; + } + + rc = xc_domain_hvm_setcontext(xch, ctx->domid, + ctx->arm.restore.context, + ctx->arm.restore.contextsz); + if ( rc < 0 ) + { + PERROR("Unable to restore HVM context"); + return rc; + } + + rc = xc_dom_gnttab_hvm_seed(xch, ctx->domid, + ctx->restore.console_gfn, + ctx->restore.xenstore_gfn, + ctx->restore.console_domid, + ctx->restore.xenstore_domid); + if ( rc ) + { + PERROR("Failed to seed grant table"); + return rc; + } + + return rc; +} + +static int arm_cleanup(struct xc_sr_context *ctx) +{ + /* no-op */ + return 0; +} + +struct xc_sr_restore_ops restore_ops_arm = +{ + .pfn_is_valid = arm_pfn_is_valid, + .pfn_to_gfn = arm_pfn_to_gfn, + .set_gfn = arm_set_gfn, + .set_page_type = arm_set_page_type, + .localise_page = arm_localise_page, + .setup = arm_setup, + .process_record = arm_process_record, + .stream_complete = arm_stream_complete, + .cleanup = arm_cleanup, +}; + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/tools/libxc/xc_sr_save_arm.c b/tools/libxc/xc_sr_save_arm.c new file mode 100644 index 0000000..1442679 --- /dev/null +++ b/tools/libxc/xc_sr_save_arm.c @@ -0,0 +1,183 @@ +#include <assert.h> + +#include "xc_sr_common_arm.h" + +#include <xen/hvm/params.h> + +/* + * Query for the HVM context and write an HVM_CONTEXT record into the stream. + */ +static int write_hvm_context(struct xc_sr_context *ctx) +{ + xc_interface *xch = ctx->xch; + int rc, hvm_buf_size; + struct xc_sr_record hvm_rec = + { + .type = REC_TYPE_HVM_CONTEXT, + }; + + IPRINTF("Writing HVM context"); + hvm_buf_size = xc_domain_hvm_getcontext(xch, ctx->domid, 0, 0); + if ( hvm_buf_size < 0 ) + { + PERROR("Couldn't get HVM context size from Xen"); + rc = -1; + goto out; + } + + hvm_rec.data = malloc(hvm_buf_size); + if ( !hvm_rec.data ) + { + PERROR("Couldn't allocate memory"); + rc = -1; + goto out; + } + + hvm_buf_size = xc_domain_hvm_getcontext(xch, ctx->domid, + hvm_rec.data, hvm_buf_size); + if ( hvm_buf_size < 0 ) + { + PERROR("Couldn't get HVM context from Xen"); + rc = -1; + goto out; + } + + hvm_rec.length = hvm_buf_size; + rc = write_record(ctx, &hvm_rec); + if ( rc < 0 ) + { + PERROR("error write HVM_CONTEXT record"); + goto out; + } + + out: + free(hvm_rec.data); + return rc; +} + +/* + * Query for a range of HVM parameters and write an HVM_PARAMS record into the + * stream. + */ +static int write_hvm_params(struct xc_sr_context *ctx) +{ + static const unsigned int params[] = { + HVM_PARAM_STORE_PFN, + HVM_PARAM_CONSOLE_PFN, + }; + + xc_interface *xch = ctx->xch; + struct xc_sr_rec_hvm_params_entry entries[ARRAY_SIZE(params)]; + struct xc_sr_rec_hvm_params hdr = { + .count = 0, + }; + struct xc_sr_record rec = { + .type = REC_TYPE_HVM_PARAMS, + .length = sizeof(hdr), + .data = &hdr, + }; + unsigned int i; + int rc; + + for ( i = 0; i < ARRAY_SIZE(params); i++ ) + { + uint32_t index = params[i]; + uint64_t value; + + rc = xc_hvm_param_get(xch, ctx->domid, index, &value); + if ( rc ) + { + PERROR("Failed to get HVMPARAM at index %u", index); + return rc; + } + + if ( value != 0 ) + { + entries[hdr.count].index = index; + entries[hdr.count].value = value; + hdr.count++; + } + } + + rc = write_split_record(ctx, &rec, entries, hdr.count * sizeof(*entries)); + if ( rc ) + PERROR("Failed to write HVM_PARAMS record"); + + return rc; +} + +static xen_pfn_t arm_pfn_to_gfn(const struct xc_sr_context *ctx, + xen_pfn_t pfn) +{ + /* identity map */ + return pfn; +} + +static int arm_normalise_page(struct xc_sr_context *ctx, + xen_pfn_t type, void **page) +{ + /* no-op */ + return 0; +} + +static int arm_setup(struct xc_sr_context *ctx) +{ + /* no-op */ + return 0; +} + +static int arm_start_of_stream(struct xc_sr_context *ctx) +{ + /* no-op */ + return 0; +} + +static int arm_start_of_checkpoint(struct xc_sr_context *ctx) +{ + /* no-op */ + return 0; +} + +static int arm_end_of_checkpoint(struct xc_sr_context *ctx) +{ + int rc; + + /* Write the HVM_CONTEXT record. */ + rc = write_hvm_context(ctx); + if ( rc ) + return rc; + + /* Write HVM_PARAMS record contains applicable HVM params. */ + rc = write_hvm_params(ctx); + if ( rc ) + return rc; + + return 0; +} + +static int arm_cleanup(struct xc_sr_context *ctx) +{ + /* no-op */ + return 0; +} + +struct xc_sr_save_ops save_ops_arm = +{ + .pfn_to_gfn = arm_pfn_to_gfn, + .normalise_page = arm_normalise_page, + .setup = arm_setup, + .start_of_stream = arm_start_of_stream, + .start_of_checkpoint = arm_start_of_checkpoint, + .end_of_checkpoint = arm_end_of_checkpoint, + .cleanup = arm_cleanup, +}; + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ -- 2.6.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |