[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 7/9] tools/libxc: x86 pv restore implementation



On Wed, 2014-04-30 at 19:36 +0100, Andrew Cooper wrote:
> +    ihdr.id      = ntohl(ihdr.id);
> +    ihdr.version = ntohl(ihdr.version);
> +    ihdr.options = ntohs(ihdr.options);
> +
> +    if ( ihdr.marker != IHDR_MARKER )
> +    {
> +        ERROR("Invalid marker: Got 0x%016"PRIx64, ihdr.marker);
> +        return -1;
> +    }
> +    else if ( ihdr.id != IHDR_ID )

Can we do away with the unnecessary elses please.


> +    if ( xc_domain_getinfo(xch, dom, 1, &ctx.dominfo) != 1 )
> +    {
> +        PERROR("Failed to get domain info");
> +        return -1;
> +    }
> +
> +    if ( ctx.dominfo.domid != dom )
> +    {
> +        ERROR("Domain %d does not exist", dom);
> +        return -1;
> +    }
> +
> +    ctx.domid = dom;
> +    IPRINTF("Restoring domain %d", dom);
> +
> +    if ( read_headers(&ctx) )
> +        return -1;
> +
> +    if ( ctx.dominfo.hvm )
> +    {
> +        ERROR("HVM Restore not supported yet");
> +        return -1;
> +    }
> +    else

Unneeded.

> +
> +static int handle_x86_pv_vcpu_basic(struct context *ctx, struct record *rec)
> +{
> +    xc_interface *xch = ctx->xch;
> +    struct rec_x86_pv_vcpu *vhdr = rec->data;
> +    vcpu_guest_context_any_t vcpu;
> +    size_t vcpusz = ctx->x86_pv.width == 8 ? sizeof vcpu.x64 : sizeof 
> vcpu.x32;
> +    xen_pfn_t pfn, mfn;
> +    unsigned long tmp;
> +    unsigned i;
> +    int rc = -1;
> +
> +    if ( rec->length <= sizeof *vhdr )
> +    {
> +        ERROR("X86_PV_VCPU_BASIC record trucated: length %"PRIu32", min %zu",

"truncated"

> +        else if ( (ctx->x86_pv.pfn_types[pfn] & 
> XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
> +                  (((xen_pfn_t)ctx->x86_pv.levels) << 
> XEN_DOMCTL_PFINFO_LTAB_SHIFT) )

I'd think some temporaries would help with clarity here.

> +static int handle_x86_pv_vcpu_extended(struct context *ctx, struct record 
> *rec)
> +{
> +    xc_interface *xch = ctx->xch;
> +    struct rec_x86_pv_vcpu *vcpu = rec->data;
> +    DECLARE_DOMCTL;
> +
> +    if ( rec->length <= sizeof *vcpu )
> +    {
> +        ERROR("X86_PV_VCPU_EXTENDED record trucated: length %"PRIu32", min 
> %zu",
> +              rec->length, sizeof *vcpu + 1);

I've just realised that there was a X86_PV_VCPU_EXTENDED in
handle_x86_pv_vcpu_basic which was wrong, but I've trimmed it already so
I'll comment here.


> +        return -1;
> +    }
> +    else if ( rec->length > sizeof *vcpu + 128 )

There's an awful lot of magic numbers throughout this series. Can we try
and use meaningful symbolic names for things please.

> +int restore_x86_pv(struct context *ctx)
> +{
> +    xc_interface *xch = ctx->xch;
> +    struct record rec;
> +    int rc;
> +
> +    IPRINTF("In experimental %s", __func__);
> +
> +    if ( ctx->restore.guest_type != DHDR_TYPE_x86_pv )
> +    {
> +        ERROR("Unable to restore %s domain into an x86_pv domain",
> +              dhdr_type_to_str(ctx->restore.guest_type));
> +        return -1;
> +    }
> +    else if ( ctx->restore.guest_page_size != 4096 )

#define's exist for this.

> +    /* all done */
> +    IPRINTF("All Done");
> +    assert(!rc);
> +    goto cleanup;
> +
> + err:
> +    assert(rc);

FWIW I think you could drop this and let the success case fall through
without the hop and a skip..

> + cleanup:
> +
> +    free(ctx->x86_pv.p2m);
> +    free(ctx->x86_pv.p2m_pfns);
> +    free(ctx->x86_pv.pfn_types);
> +    free(ctx->restore.populated_pfns);
> +
> +    if ( ctx->x86_pv.m2p )
> +        munmap(ctx->x86_pv.m2p, ctx->x86_pv.nr_m2p_frames * PAGE_SIZE);
> +
> +    return rc;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.