[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC v3 4/6] xen/arm: Add save/restore support for guest core registers
This patch implements a save/resore support for ARM guest core registers. Signed-off-by: Evgeny Fedotov <e.fedotov@xxxxxxxxxxx> Signed-off-by: Wei Huang <w1.huang@xxxxxxxxxxx> --- xen/arch/arm/hvm.c | 263 +++++++++++++++++++++++++++++++- xen/include/public/arch-arm/hvm/save.h | 121 ++++++++++++++- 2 files changed, 382 insertions(+), 2 deletions(-) diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c index 471c4cd..7bfa547 100644 --- a/xen/arch/arm/hvm.c +++ b/xen/arch/arm/hvm.c @@ -7,14 +7,15 @@ #include <xsm/xsm.h> +#include <xen/hvm/save.h> #include <public/xen.h> #include <public/hvm/params.h> #include <public/hvm/hvm_op.h> #include <asm/hypercall.h> +#include <asm/gic.h> long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) - { long rc = 0; @@ -65,3 +66,263 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) return rc; } + +static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) +{ + struct hvm_arm_cpu ctxt; + struct vcpu_guest_core_regs c; + struct vcpu *v; + + /* Save the state of CPU */ + for_each_vcpu( d, v ) + { + memset(&ctxt, 0, sizeof(ctxt)); + + ctxt.sctlr = v->arch.sctlr; + ctxt.ttbr0 = v->arch.ttbr0; + ctxt.ttbr1 = v->arch.ttbr1; + ctxt.ttbcr = v->arch.ttbcr; + + ctxt.dacr = v->arch.dacr; + ctxt.ifsr = v->arch.ifsr; +#ifdef CONFIG_ARM_32 + ctxt.ifar = v->arch.ifar; + ctxt.dfar = v->arch.dfar; + ctxt.dfsr = v->arch.dfsr; +#else + ctxt.far = v->arch.far; + ctxt.esr = v->arch.esr; +#endif + +#ifdef CONFIG_ARM_32 + ctxt.mair0 = v->arch.mair0; + ctxt.mair1 = v->arch.mair1; +#else + ctxt.mair0 = v->arch.mair; +#endif + /* Control Registers */ + ctxt.actlr = v->arch.actlr; + ctxt.sctlr = v->arch.sctlr; + ctxt.cpacr = v->arch.cpacr; + + ctxt.contextidr = v->arch.contextidr; + ctxt.tpidr_el0 = v->arch.tpidr_el0; + ctxt.tpidr_el1 = v->arch.tpidr_el1; + ctxt.tpidrro_el0 = v->arch.tpidrro_el0; + + /* CP 15 */ + ctxt.csselr = v->arch.csselr; + + ctxt.afsr0 = v->arch.afsr0; + ctxt.afsr1 = v->arch.afsr1; + ctxt.vbar = v->arch.vbar; + ctxt.par = v->arch.par; + ctxt.teecr = v->arch.teecr; + ctxt.teehbr = v->arch.teehbr; + +#ifdef CONFIG_ARM_32 + ctxt.joscr = v->arch.joscr; + ctxt.jmcr = v->arch.jmcr; +#endif + + memset(&c, 0, sizeof(c)); + + /* get guest core registers */ + vcpu_regs_hyp_to_user(v, &c); + + ctxt.x0 = c.x0; + ctxt.x1 = c.x1; + ctxt.x2 = c.x2; + ctxt.x3 = c.x3; + ctxt.x4 = c.x4; + ctxt.x5 = c.x5; + ctxt.x6 = c.x6; + ctxt.x7 = c.x7; + ctxt.x8 = c.x8; + ctxt.x9 = c.x9; + ctxt.x10 = c.x10; + ctxt.x11 = c.x11; + ctxt.x12 = c.x12; + ctxt.x13 = c.x13; + ctxt.x14 = c.x14; + ctxt.x15 = c.x15; + ctxt.x16 = c.x16; + ctxt.x17 = c.x17; + ctxt.x18 = c.x18; + ctxt.x19 = c.x19; + ctxt.x20 = c.x20; + ctxt.x21 = c.x21; + ctxt.x22 = c.x22; + ctxt.x23 = c.x23; + ctxt.x24 = c.x24; + ctxt.x25 = c.x25; + ctxt.x26 = c.x26; + ctxt.x27 = c.x27; + ctxt.x28 = c.x28; + ctxt.x29 = c.x29; + ctxt.x30 = c.x30; + ctxt.pc64 = c.pc64; + ctxt.cpsr = c.cpsr; + ctxt.spsr_el1 = c.spsr_el1; /* spsr_svc */ + +#ifdef CONFIG_ARM_32 + ctxt.spsr_fiq = c.spsr_fiq; + ctxt.spsr_irq = c.spsr_irq; + ctxt.spsr_und = c.spsr_und; + ctxt.spsr_abt = c.spsr_abt; +#endif +#ifdef CONFIG_ARM_64 + ctxt.sp_el0 = c.sp_el0; + ctxt.sp_el1 = c.sp_el1; + ctxt.elr_el1 = c.elr_el1; +#endif + + /* check VFP state size before dumping */ + BUILD_BUG_ON(sizeof(v->arch.vfp) > sizeof (ctxt.vfp_state)); + memcpy((void*) &ctxt.vfp_state, (void*) &v->arch.vfp, + sizeof(v->arch.vfp)); + + if ( hvm_save_entry(VCPU, v->vcpu_id, h, &ctxt) != 0 ) + return 1; + } + + return 0; +} + +static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) +{ + int vcpuid; + struct hvm_arm_cpu ctxt; + struct vcpu *v; + struct vcpu_guest_core_regs c; + + /* Which vcpu is this? */ + vcpuid = hvm_load_instance(h); + if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) + { + dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n", + d->domain_id, vcpuid); + return -EINVAL; + } + + if ( hvm_load_entry(VCPU, h, &ctxt) != 0 ) + return -EINVAL; + + v->arch.sctlr = ctxt.sctlr; + v->arch.ttbr0 = ctxt.ttbr0; + v->arch.ttbr1 = ctxt.ttbr1; + v->arch.ttbcr = ctxt.ttbcr; + + v->arch.dacr = ctxt.dacr; + v->arch.ifsr = ctxt.ifsr; +#ifdef CONFIG_ARM_32 + v->arch.ifar = ctxt.ifar; + v->arch.dfar = ctxt.dfar; + v->arch.dfsr = ctxt.dfsr; +#else + v->arch.far = ctxt.far; + v->arch.esr = ctxt.esr; +#endif + +#ifdef CONFIG_ARM_32 + v->arch.mair0 = ctxt.mair0; + v->arch.mair1 = ctxt.mair1; +#else + v->arch.mair = ctxt.mair0; +#endif + + /* Control Registers */ + v->arch.actlr = ctxt.actlr; + v->arch.cpacr = ctxt.cpacr; + v->arch.contextidr = ctxt.contextidr; + v->arch.tpidr_el0 = ctxt.tpidr_el0; + v->arch.tpidr_el1 = ctxt.tpidr_el1; + v->arch.tpidrro_el0 = ctxt.tpidrro_el0; + + /* CP 15 */ + v->arch.csselr = ctxt.csselr; + + v->arch.afsr0 = ctxt.afsr0; + v->arch.afsr1 = ctxt.afsr1; + v->arch.vbar = ctxt.vbar; + v->arch.par = ctxt.par; + v->arch.teecr = ctxt.teecr; + v->arch.teehbr = ctxt.teehbr; +#ifdef CONFIG_ARM_32 + v->arch.joscr = ctxt.joscr; + v->arch.jmcr = ctxt.jmcr; +#endif + + /* fill guest core registers */ + memset(&c, 0, sizeof(c)); + c.x0 = ctxt.x0; + c.x1 = ctxt.x1; + c.x2 = ctxt.x2; + c.x3 = ctxt.x3; + c.x4 = ctxt.x4; + c.x5 = ctxt.x5; + c.x6 = ctxt.x6; + c.x7 = ctxt.x7; + c.x8 = ctxt.x8; + c.x9 = ctxt.x9; + c.x10 = ctxt.x10; + c.x11 = ctxt.x11; + c.x12 = ctxt.x12; + c.x13 = ctxt.x13; + c.x14 = ctxt.x14; + c.x15 = ctxt.x15; + c.x16 = ctxt.x16; + c.x17 = ctxt.x17; + c.x18 = ctxt.x18; + c.x19 = ctxt.x19; + c.x20 = ctxt.x20; + c.x21 = ctxt.x21; + c.x22 = ctxt.x22; + c.x23 = ctxt.x23; + c.x24 = ctxt.x24; + c.x25 = ctxt.x25; + c.x26 = ctxt.x26; + c.x27 = ctxt.x27; + c.x28 = ctxt.x28; + c.x29 = ctxt.x29; + c.x30 = ctxt.x30; + c.pc64 = ctxt.pc64; + c.cpsr = ctxt.cpsr; + c.spsr_el1 = ctxt.spsr_el1; /* spsr_svc */ + +#ifdef CONFIG_ARM_32 + c.spsr_fiq = ctxt.spsr_fiq; + c.spsr_irq = ctxt.spsr_irq; + c.spsr_und = ctxt.spsr_und; + c.spsr_abt = ctxt.spsr_abt; +#endif +#ifdef CONFIG_ARM_64 + c.sp_el0 = ctxt.sp_el0; + c.sp_el1 = ctxt.sp_el1; + c.elr_el1 = ctxt.elr_el1; +#endif + + /* set guest core registers */ + vcpu_regs_user_to_hyp(v, &c); + + BUILD_BUG_ON(sizeof(v->arch.vfp) > sizeof (ctxt.vfp_state)); + memcpy(&v->arch.vfp, &ctxt, sizeof(v->arch.vfp)); + + v->is_initialised = 1; + clear_bit(_VPF_down, &v->pause_flags); + + return 0; +} + +HVM_REGISTER_SAVE_RESTORE(VCPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt, 1, + HVMSR_PER_VCPU); + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/public/arch-arm/hvm/save.h b/xen/include/public/arch-arm/hvm/save.h index 8679bfd..18e5899 100644 --- a/xen/include/public/arch-arm/hvm/save.h +++ b/xen/include/public/arch-arm/hvm/save.h @@ -86,10 +86,129 @@ struct hvm_arm_timer }; DECLARE_HVM_SAVE_TYPE(TIMER, 4, struct hvm_arm_timer); +/* ARM core hardware info */ +struct hvm_arm_cpu +{ + /* ======= Guest VFP State ======= + * - 34 8-bytes required for AArch32 guests + * - 66 8-bytes required for AArch64 guests + */ + uint64_t vfp_state[66]; + + /* ======= Guest Core Registers ======= + * - Each reg is multiplexed for AArch64 and AArch32 guests, if possible + * - Each comments, /AArch64_reg, AArch32_reg/, describes its + * corresponding 64- and 32-bit register name. "NA" means + * "Not Applicable". + * - Check "struct vcpu_guest_core_regs" for details. + */ + uint64_t x0; /* x0, r0_usr */ + uint64_t x1; /* x1, r1_usr */ + uint64_t x2; /* x2, r2_usr */ + uint64_t x3; /* x3, r3_usr */ + uint64_t x4; /* x4, r4_usr */ + uint64_t x5; /* x5, r5_usr */ + uint64_t x6; /* x6, r6_usr */ + uint64_t x7; /* x7, r7_usr */ + uint64_t x8; /* x8, r8_usr */ + uint64_t x9; /* x9, r9_usr */ + uint64_t x10; /* x10, r10_usr */ + uint64_t x11; /* x11, r11_usr */ + uint64_t x12; /* x12, r12_usr */ + uint64_t x13; /* x13, sp_usr */ + uint64_t x14; /* x14, lr_usr; */ + uint64_t x15; /* x15, __unused_sp_hyp */ + uint64_t x16; /* x16, lr_irq */ + uint64_t x17; /* x17, sp_irq */ + uint64_t x18; /* x18, lr_svc */ + uint64_t x19; /* x19, sp_svc */ + uint64_t x20; /* x20, lr_abt */ + uint64_t x21; /* x21, sp_abt */ + uint64_t x22; /* x22, lr_und */ + uint64_t x23; /* x23, sp_und */ + uint64_t x24; /* x24, r8_fiq */ + uint64_t x25; /* x25, r9_fiq */ + uint64_t x26; /* x26, r10_fiq */ + uint64_t x27; /* x27, r11_fiq */ + uint64_t x28; /* x28, r12_fiq */ + uint64_t x29; /* fp, sp_fiq */ + uint64_t x30; /* lr, lr_fiq */ + + /* return address (EL1 ==> EL0) */ + uint64_t elr_el1; /* elr_el1, NA */ + /* return address (EL2 ==> EL1) */ + uint64_t pc64; /* elr_el2, elr_el2 */ + + /* spsr registers */ + uint32_t spsr_el1; /* spsr_el1, spsr_svc */ + uint32_t spsr_fiq; /* NA, spsr_fiq */ + uint32_t spsr_irq; /* NA, spsr_irq */ + uint32_t spsr_und; /* NA, spsr_und */ + uint32_t spsr_abt; /* NA, spsr_abt */ + + /* stack pointers */ + uint64_t sp_el0; /* sp_el0, NA */ + uint64_t sp_el1; /* sp_el1, NA */ + + /* guest mode */ + uint32_t cpsr; /* spsr_el2, spsr_el2 */ + + /* ======= Guest System Registers ======= + * - multiplexed for AArch32 and AArch64 guests + * - 64-bit preferred if needed (for 64-bit guests) + * - architecture specific registers are noted specifically + */ + /* exception */ + uint64_t vbar; /* vbar, vbar */ + + /* mmu related */ + uint64_t ttbcr; /* ttbcr, ttbcr */ + uint64_t ttbr0; /* ttbr0, ttbr0 */ + uint64_t ttbr1; /* ttbr1, ttbr1 */ + uint32_t dacr; /* NA, dacr32 */ + + uint64_t par; /* par, par */ + uint64_t mair0; /* mair, mair0 */ + uint64_t mair1; /* NA, mair1 */ + + /* fault status */ + uint32_t ifar; /* ifar, ifar */ + uint32_t ifsr; /* ifsr, ifsr */ + uint32_t dfar; /* dfar, dfar */ + uint32_t dfsr; /* dfsr, dfsr */ + + uint64_t far; /* far, far */ + uint64_t esr; /* esr, esr */ + + uint32_t afsr0; /* afsr0, afsr0 */ + uint32_t afsr1; /* afsr1, afsr1 */ + + /* thumbee and jazelle */ + uint32_t teecr; /* NA, teecr */ + uint32_t teehbr; /* NA, teehbr */ + + uint32_t joscr; /* NA, joscr */ + uint32_t jmcr; /* NA, jmcr */ + + /* control registers */ + uint32_t sctlr; /* sctlr, sctlr */ + uint32_t actlr; /* actlr, actlr */ + uint32_t cpacr; /* cpacr, cpacr */ + + uint32_t csselr; /* csselr, csselr */ + + /* software management related */ + uint32_t contextidr; /* contextidr, contextidr */ + uint64_t tpidr_el0; /* tpidr_el0, tpidr_el0 */ + uint64_t tpidr_el1; /* tpidr_el1, tpidr_el1 */ + uint64_t tpidrro_el0; /* tpidrro_el0, tdidrro_el0 */ +}; +DECLARE_HVM_SAVE_TYPE(VCPU, 5, struct hvm_arm_cpu); + /* * Largest type-code in use */ -#define HVM_SAVE_CODE_MAX 4 +#define HVM_SAVE_CODE_MAX 5 #endif -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |