[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 1/9] xen/arm: Implement hvm save and restore



On 10/04/2013 05:43 AM, Jaeyong Yoo wrote:
> Implement save/restore of hvm context hypercall. In hvm
> context save/restore, we save gic, timer and vfp registers.
> 
> Signed-off-by: Jaeyong Yoo <jaeyong.yoo@xxxxxxxxxxx>
> ---
>  xen/arch/arm/Makefile                  |   1 +
>  xen/arch/arm/domctl.c                  |  88 ++++++++++++-
>  xen/arch/arm/hvm.c                     | 228 
> +++++++++++++++++++++++++++++++++
>  xen/arch/arm/save.c                    |  66 ++++++++++
>  xen/common/Makefile                    |   2 +
>  xen/include/asm-arm/hvm/support.h      |  29 +++++
>  xen/include/public/arch-arm/hvm/save.h |  66 ++++++++++
>  7 files changed, 479 insertions(+), 1 deletion(-)
>  create mode 100644 xen/arch/arm/save.c
>  create mode 100644 xen/include/asm-arm/hvm/support.h
> 
> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
> index 003ac84..8910a6c 100644
> --- a/xen/arch/arm/Makefile
> +++ b/xen/arch/arm/Makefile
> @@ -31,6 +31,7 @@ obj-y += vuart.o
>  obj-y += hvm.o
>  obj-y += device.o
>  obj-y += decode.o
> +obj-y += save.o
>  
>  #obj-bin-y += ....o
>  
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index 851ee40..86c07de 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -9,12 +9,98 @@
>  #include <xen/lib.h>
>  #include <xen/errno.h>
>  #include <xen/sched.h>
> +#include <xen/hvm/save.h>
> +#include <xen/guest_access.h>
>  #include <public/domctl.h>
>  
>  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>                      XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>  {
> -    return -ENOSYS;
> +    long ret = 0;
> +    bool_t copyback = 0;
> +
> +    switch ( domctl->cmd )
> +    {
> +    case XEN_DOMCTL_sethvmcontext:
> +    {
> +        struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
> +
> +        ret = -ENOMEM;
> +        if ( (c.data = xmalloc_bytes(c.size)) == NULL )
> +            goto sethvmcontext_out;
> +
> +        ret = -EFAULT;
> +        if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 
> 0 )
> +            goto sethvmcontext_out;
> +
> +        domain_pause(d);
> +        ret = hvm_load(d, &c);
> +        domain_unpause(d);
> +
> +    sethvmcontext_out:
> +        if ( c.data != NULL )
> +            xfree(c.data);
> +    }
> +    break;
> +    case XEN_DOMCTL_gethvmcontext:
> +    {
> +        struct hvm_domain_context c = { 0 };
> +
> +        ret = -EINVAL;
> +
> +        c.size = hvm_save_size(d);
> +
> +        if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
> +        {
> +            /* Client is querying for the correct buffer size */
> +            domctl->u.hvmcontext.size = c.size;
> +            ret = 0;
> +            goto gethvmcontext_out;
> +        }
> +        /* Check that the client has a big enough buffer */
> +        ret = -ENOSPC;
> +        if ( domctl->u.hvmcontext.size < c.size )
> +        {
> +            printk("(gethvmcontext) size error: %d and %d\n",
> +                   domctl->u.hvmcontext.size, c.size );
> +            goto gethvmcontext_out;
> +        }
> +
> +        /* Allocate our own marshalling buffer */
> +        ret = -ENOMEM;
> +        if ( (c.data = xmalloc_bytes(c.size)) == NULL )
> +        {
> +            printk("(gethvmcontext) xmalloc_bytes failed: %d\n", c.size );
> +            goto gethvmcontext_out;
> +        }
> +
> +        domain_pause(d);
> +        ret = hvm_save(d, &c);
> +        domain_unpause(d);
> +
> +        domctl->u.hvmcontext.size = c.cur;
> +        if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 
> )
> +        {
> +            printk("(gethvmcontext) copy to guest failed\n");
> +            ret = -EFAULT;
> +        }
> +
> +    gethvmcontext_out:
> +        copyback = 1;
> +
> +        if ( c.data != NULL )
> +            xfree(c.data);
> +    }
> +    break;
> +
> +    default:
> +        return -EINVAL;
> +    }
> +
> +    if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
> +        ret = -EFAULT;
> +
> +    return ret;
>  }
>  
>  void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
> diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
> index 471c4cd..befd75d 100644
> --- a/xen/arch/arm/hvm.c
> +++ b/xen/arch/arm/hvm.c
> @@ -7,11 +7,13 @@
>  
>  #include <xsm/xsm.h>
>  
> +#include <xen/hvm/save.h>
>  #include <public/xen.h>
>  #include <public/hvm/params.h>
>  #include <public/hvm/hvm_op.h>
>  
>  #include <asm/hypercall.h>
> +#include <asm/gic.h>
>  
>  long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
>  
> @@ -65,3 +67,229 @@ long do_hvm_op(unsigned long op, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>  
>      return rc;
>  }
> +
> +static void vgic_irq_rank_save(struct vgic_rank *ext,
> +                               struct vgic_irq_rank *rank)
> +{
> +    spin_lock(&rank->lock);
> +    /* Some of VGIC registers are not used yet, it is for a future usage */
> +    /* IENABLE, IACTIVE, IPEND,  PENDSGI registers */
> +    ext->ienable = rank->ienable;
> +    ext->iactive = rank->iactive;
> +    ext->ipend = rank->ipend;
> +    ext->pendsgi = rank->pendsgi;
> +    /* ICFG */
> +    ext->icfg[0] = rank->icfg[0];
> +    ext->icfg[1] = rank->icfg[1];
Can you use memcpy?

> +    /* IPRIORITY */
> +    memcpy(ext->ipriority, rank->ipriority, sizeof(rank->ipriority));
> +    /* ITARGETS */
> +    memcpy(ext->itargets, rank->itargets, sizeof(rank->itargets));
> +    spin_unlock(&rank->lock);
> +}
> +
> +static void vgic_irq_rank_restore(struct vgic_irq_rank *rank,
> +                                  struct vgic_rank *ext)
> +{
> +    spin_lock(&rank->lock);
> +    /* IENABLE, IACTIVE, IPEND,  PENDSGI registers */
> +    rank->ienable = ext->ienable;
> +    rank->iactive = ext->iactive;
> +    rank->ipend = ext->ipend;
> +    rank->pendsgi = ext->pendsgi;
> +    /* ICFG */
> +    rank->icfg[0] = ext->icfg[0];
> +    rank->icfg[1] = ext->icfg[1];

Same here.

> +    /* IPRIORITY */
> +    memcpy(rank->ipriority, ext->ipriority, sizeof(rank->ipriority));
> +    /* ITARGETS */
> +    memcpy(rank->itargets, ext->itargets, sizeof(rank->itargets));
> +    spin_unlock(&rank->lock);
> +}
> +
> +
> +static int gic_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_hw_gic ctxt;
> +    struct vcpu *v;
> +
> +    /* Save the state of GICs */
> +    for_each_vcpu( d, v )
> +    {
> +        ctxt.gic_hcr = v->arch.gic_hcr;
> +        ctxt.gic_vmcr = v->arch.gic_vmcr;
> +        ctxt.gic_apr = v->arch.gic_apr;
> +
> +        /* Save list registers and masks */
> +        /* (it is not necessary to save/restore them, but LR state can have
> +         * influence on downtime after Live Migration (to be tested)
> +         */
> +        memcpy(ctxt.gic_lr, v->arch.gic_lr, sizeof(ctxt.gic_lr));
> +        ctxt.lr_mask = v->arch.lr_mask;
> +        ctxt.event_mask = v->arch.event_mask;
> +
> +        /* Save PPI states (per-CPU) */
> +        /* It is necessary if SMP enabled */
> +        vgic_irq_rank_save(&ctxt.ppi_state,  &v->arch.vgic.private_irqs);
> +
> +        if ( hvm_save_entry(GIC, v->vcpu_id, h, &ctxt) != 0 )
> +            return 1;
> +    }
> +    return 0;
> +}
> +
> +static int gic_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    int vcpuid;
> +    struct hvm_hw_gic ctxt;
> +    struct vcpu *v;
> +
> +    /* Which vcpu is this? */
> +    vcpuid = hvm_load_instance(h);
> +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> +    {
> +        dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
> +                d->domain_id, vcpuid);
> +        return -EINVAL;
> +    }
> +
> +    if ( hvm_load_entry(GIC, h, &ctxt) != 0 )
> +        return -EINVAL;
> +
> +    v->arch.gic_hcr = ctxt.gic_hcr;
> +    v->arch.gic_vmcr = ctxt.gic_vmcr;
> +    v->arch.gic_apr = ctxt.gic_apr;
> +
> +    /* Restore list registers and masks */
> +    memcpy(v->arch.gic_lr, ctxt.gic_lr, sizeof(ctxt.gic_lr));
> +    v->arch.lr_mask = ctxt.lr_mask;
> +    v->arch.event_mask = ctxt.event_mask;
> +
> +    /* Restore PPI states */
> +    vgic_irq_rank_restore(&v->arch.vgic.private_irqs, &ctxt.ppi_state);
> +
> +    return 0;
> +}
> +
> +HVM_REGISTER_SAVE_RESTORE(GIC, gic_save, gic_load, 1, HVMSR_PER_VCPU);
> +
> +static int timer_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_hw_timer ctxt;
> +    struct vcpu *v;
> +    struct vtimer *t;
> +    int i;
> +
> +    /* Save the state of vtimer and ptimer */
> +    for_each_vcpu( d, v )
> +    {
> +        t = &v->arch.virt_timer;
> +        for ( i = 0; i < 2; i++ )
> +        {
> +            ctxt.cval = t->cval;
> +            ctxt.ctl = t->ctl;
> +            ctxt.vtb_offset = i ? d->arch.phys_timer_base.offset :
> +                d->arch.virt_timer_base.offset;
> +            ctxt.type = i;
> +            if ( hvm_save_entry(A15_TIMER, v->vcpu_id, h, &ctxt) != 0 )
> +                return 1;
> +            t = &v->arch.phys_timer;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +static int timer_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    int vcpuid;
> +    struct hvm_hw_timer ctxt;
> +    struct vcpu *v;
> +    struct vtimer *t = NULL;
> +
> +    /* Which vcpu is this? */
> +    vcpuid = hvm_load_instance(h);
> +
> +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> +    {
> +        dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
> +                d->domain_id, vcpuid);
> +        return -EINVAL;
> +    }
> +
> +    if ( hvm_load_entry(A15_TIMER, h, &ctxt) != 0 )
> +        return -EINVAL;
> +
> +
> +    if ( ctxt.type == TIMER_TYPE_VIRT )
> +    {
> +        t = &v->arch.virt_timer;
> +        d->arch.virt_timer_base.offset = ctxt.vtb_offset;
> +
> +    }
> +    else
> +    {
> +        t = &v->arch.phys_timer;
> +        d->arch.phys_timer_base.offset = ctxt.vtb_offset;
> +    }
> +
> +    t->cval = ctxt.cval;
> +    t->ctl = ctxt.ctl;
> +    t->v = v;
> +
> +    return 0;
> +}
> +
> +HVM_REGISTER_SAVE_RESTORE(A15_TIMER, timer_save, timer_load, 2, 
> HVMSR_PER_VCPU);
> +
> +static int vfp_save(struct domain *d, hvm_domain_context_t *h)
> +{
> +    struct hvm_hw_vfp ctxt;
> +    struct vcpu *v;
> +
> +    /* Save the state of VFP */
> +    for_each_vcpu( d, v )
> +    {
> +        if ( sizeof(v->arch.vfp) != sizeof (ctxt) )
> +        {
> +            dprintk(XENLOG_G_ERR, "HVM: check VFP structure match\n");
> +            return -EINVAL;
> +        }
> +        memcpy(&ctxt, &v->arch.vfp, sizeof(ctxt));
> +
> +        if ( hvm_save_entry(VFP, v->vcpu_id, h, &ctxt) != 0 )
> +            return 1;
> +    }
> +    return 0;
> +}
> +
> +static int vfp_load(struct domain *d, hvm_domain_context_t *h)
> +{
> +    int vcpuid;
> +    struct hvm_hw_vfp ctxt;
> +    struct vcpu *v;
> +
> +    /* Which vcpu is this? */
> +    vcpuid = hvm_load_instance(h);
> +    if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> +    {
> +        dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
> +                d->domain_id, vcpuid);
> +        return -EINVAL;
> +    }
> +
> +    if ( sizeof(v->arch.vfp) != sizeof (ctxt) )
> +    {
> +        dprintk(XENLOG_G_ERR, "HVM: check VFP structure match\n");
> +        return -EINVAL;
> +    }
> +
> +    if ( hvm_load_entry(VFP, h, &ctxt) != 0 )
> +        return -EINVAL;
> +
> +    memcpy(&v->arch.vfp, &ctxt, sizeof(ctxt));
> +
> +    return 0;
> +}
> +
> +HVM_REGISTER_SAVE_RESTORE(VFP, vfp_save, vfp_load, 1, HVMSR_PER_VCPU);
> diff --git a/xen/arch/arm/save.c b/xen/arch/arm/save.c
> new file mode 100644
> index 0000000..c923910
> --- /dev/null
> +++ b/xen/arch/arm/save.c
> @@ -0,0 +1,66 @@
> +/*
> + * hvm/save.c: Save and restore HVM guest's emulated hardware state for ARM.
> + *
> + * Copyright (c) 2013, Samsung Electronics.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along 
> with
> + * this program; if not, write to the Free Software Foundation, Inc., 59 
> Temple
> + * Place - Suite 330, Boston, MA 02111-1307 USA.
> + */
> +
> +#include <asm/hvm/support.h>
> +#include <public/hvm/save.h>
> +
> +void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
> +{
> +    hdr->cpuid = READ_SYSREG32(MIDR_EL1);
> +}
> +
> +int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
> +{
> +    uint32_t cpuid;
> +
> +    if ( hdr->magic != HVM_FILE_MAGIC )
> +    {
> +        printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
> +               d->domain_id, hdr->magic);
> +        return -1;
> +    }
> +
> +    if ( hdr->version != HVM_FILE_VERSION )
> +    {
> +        printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
> +               d->domain_id, hdr->version);
> +        return -1;
> +    }
> +
> +    cpuid = READ_SYSREG32(MIDR_EL1);
> +    if ( hdr->cpuid != cpuid )
> +    {
> +        printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
> +               "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
> +               d->domain_id, hdr->cpuid, cpuid);
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/common/Makefile b/xen/common/Makefile
> index 6da4651..733b19f 100644
> --- a/xen/common/Makefile
> +++ b/xen/common/Makefile
> @@ -61,6 +61,8 @@ subdir-$(CONFIG_COMPAT) += compat
>  
>  subdir-$(x86_64) += hvm
>  
> +subdir-$(CONFIG_ARM) += hvm
> +
>  subdir-$(coverage) += gcov
>  
>  subdir-y += libelf
> diff --git a/xen/include/asm-arm/hvm/support.h 
> b/xen/include/asm-arm/hvm/support.h
> new file mode 100644
> index 0000000..8311f2f
> --- /dev/null
> +++ b/xen/include/asm-arm/hvm/support.h
> @@ -0,0 +1,29 @@
> +/*
> + * support.h: HVM support routines used by ARMv7 VE.
> + *
> + * Copyright (c) 2012, Citrix Systems
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along 
> with
> + * this program; if not, write to the Free Software Foundation, Inc., 59 
> Temple
> + * Place - Suite 330, Boston, MA 02111-1307 USA.
> + */
> +
> +#ifndef __ASM_ARM_HVM_SUPPORT_H__
> +#define __ASM_ARM_HVM_SUPPORT_H__
> +
> +#include <xen/types.h>
> +#include <public/hvm/ioreq.h>
> +#include <xen/sched.h>
> +#include <xen/hvm/save.h>
> +#include <asm/processor.h>
> +
> +#endif /* __ASM_ARM_HVM_SUPPORT_H__ */
> diff --git a/xen/include/public/arch-arm/hvm/save.h 
> b/xen/include/public/arch-arm/hvm/save.h
> index 75b8e65..7e55384 100644
> --- a/xen/include/public/arch-arm/hvm/save.h
> +++ b/xen/include/public/arch-arm/hvm/save.h
> @@ -26,6 +26,72 @@
>  #ifndef __XEN_PUBLIC_HVM_SAVE_ARM_H__
>  #define __XEN_PUBLIC_HVM_SAVE_ARM_H__
>  
> +#define HVM_FILE_MAGIC   0x92385520
> +#define HVM_FILE_VERSION 0x00000001
> +
> +
> +struct hvm_save_header
> +{
> +    uint32_t magic;             /* Must be HVM_FILE_MAGIC */
> +    uint32_t version;           /* File format version */
> +    uint64_t changeset;         /* Version of Xen that saved this file */
> +    uint32_t cpuid;             /* MIDR_EL1 on the saving machine */
> +};
> +
> +DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
> +
> +struct vgic_rank
> +{
> +    uint32_t ienable, iactive, ipend, pendsgi;
> +    uint32_t icfg[2];
> +    uint32_t ipriority[8];
> +    uint32_t itargets[8];
> +};
> +
> +struct hvm_hw_gic
> +{
> +    uint32_t gic_hcr;
> +    uint32_t gic_vmcr;
> +    uint32_t gic_apr;
> +    uint32_t gic_lr[64];
> +    uint64_t event_mask;
> +    uint64_t lr_mask;
> +    struct vgic_rank ppi_state;
> +};
> +
> +DECLARE_HVM_SAVE_TYPE(GIC, 2, struct hvm_hw_gic);
> +
> +#define TIMER_TYPE_VIRT 0
> +#define TIMER_TYPE_PHYS 1
> +
> +struct hvm_hw_timer
> +{
> +    uint64_t vtb_offset;
> +    uint32_t ctl;
> +    uint64_t cval;
> +    uint32_t type;
> +};
> +
> +DECLARE_HVM_SAVE_TYPE(A15_TIMER, 3, struct hvm_hw_timer);
> +
> +struct hvm_hw_vfp
> +{
> +    uint64_t fpregs1[16]; /* {d0-d15} */
> +    uint64_t fpregs2[16]; /* {d16-d31} */
> +    uint32_t fpexc;
> +    uint32_t fpscr;
> +    /* VFP implementation specific state */
> +    uint32_t fpinst;
> +    uint32_t fpinst2;
> +};
> +
> +DECLARE_HVM_SAVE_TYPE(VFP, 4, struct hvm_hw_vfp);
> +
> +/*
> + * Largest type-code in use
> + */
> +#define HVM_SAVE_CODE_MAX 4
> +
>  #endif
>  
>  /*
> 


-- 
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.