[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 8/8] x86/domctl: Don't pause the whole domain if only getting vcpu state
This patch is focused on merging the *save() funcs to the *save_one() funcs to remove redundancy. Also the for loop is moved to the caller so now we can save info for a single vcpu instance. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> --- Changes since V4: - Save funcs are deleted - vcpu si added to the param list - save_one funcs are now static - Add blank line after "/* save mtrr&pat */" comment --- xen/arch/x86/cpu/mcheck/vmce.c | 28 ++-- xen/arch/x86/hvm/hpet.c | 3 +- xen/arch/x86/hvm/hvm.c | 287 +++++++++++++++++------------------------ xen/arch/x86/hvm/i8254.c | 3 +- xen/arch/x86/hvm/irq.c | 9 +- xen/arch/x86/hvm/mtrr.c | 36 ++---- xen/arch/x86/hvm/pmtimer.c | 3 +- xen/arch/x86/hvm/rtc.c | 3 +- xen/arch/x86/hvm/save.c | 125 +++++++++++++----- xen/arch/x86/hvm/vioapic.c | 3 +- xen/arch/x86/hvm/viridian.c | 30 ++--- xen/arch/x86/hvm/vlapic.c | 28 ++-- xen/arch/x86/hvm/vpic.c | 3 +- xen/include/asm-x86/hvm/save.h | 2 +- 14 files changed, 271 insertions(+), 292 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c index 404f27e..7bde9ff 100644 --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -349,29 +349,17 @@ int vmce_wrmsr(uint32_t msr, uint64_t val) return ret; } -static void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt) +static int vmce_save_vcpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h) { - ctxt->caps = v->arch.vmce.mcg_cap; - ctxt->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2; - ctxt->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2; - ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl; -} - -static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; int err = 0; + struct hvm_vmce_vcpu ctxt; - for_each_vcpu ( d, v ) - { - struct hvm_vmce_vcpu ctxt; - - vmce_save_vcpu_ctxt_one(v, &ctxt); - err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt); - if ( err ) - break; - } + ctxt.caps = v->arch.vmce.mcg_cap; + ctxt.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2; + ctxt.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2; + ctxt.mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl; + err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt); return err; } @@ -394,7 +382,7 @@ static int vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) return err ?: vmce_restore_vcpu(v, &ctxt); } -HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, +HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt_one, vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU); /* diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 2837709..3ed6547 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -516,8 +516,9 @@ static const struct hvm_mmio_ops hpet_mmio_ops = { }; -static int hpet_save(struct domain *d, hvm_domain_context_t *h) +static int hpet_save(struct vcpu *vcpu, hvm_domain_context_t *h) { + struct domain *d = vcpu->domain; HPETState *hp = domain_vhpet(d); struct vcpu *v = pt_global_vcpu_target(d); int rc; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 4a22283..ea27055 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -740,24 +740,13 @@ void hvm_domain_destroy(struct domain *d) destroy_vpci_mmcfg(d); } -static void hvm_save_tsc_adjust_one(struct vcpu *v, struct hvm_tsc_adjust *ctxt) +static int hvm_save_tsc_adjust_one(struct vcpu *v, hvm_domain_context_t *h) { - ctxt->tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust; -} - -static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; - struct hvm_tsc_adjust ctxt; int err = 0; + struct hvm_tsc_adjust ctxt; - for_each_vcpu ( d, v ) - { - hvm_save_tsc_adjust_one(v, &ctxt); - err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt); - if ( err ) - break; - } + ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust; + err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt); return err; } @@ -782,126 +771,116 @@ static int hvm_load_tsc_adjust(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust, +HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust_one, hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU); -static void hvm_save_cpu_ctxt_one(struct vcpu *v, struct hvm_hw_cpu *ctxt) +static int hvm_save_cpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h) { struct segment_register seg; + struct hvm_hw_cpu ctxt = {}; + + /* We don't need to save state for a vcpu that is down; the restore + * code will leave it down if there is nothing saved. */ + if ( v->pause_flags & VPF_down ) + return 2; /* Architecture-specific vmcs/vmcb bits */ - hvm_funcs.save_cpu_ctxt(v, ctxt); + hvm_funcs.save_cpu_ctxt(v, &ctxt); - ctxt->tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm_domain.sync_tsc); + ctxt.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm_domain.sync_tsc); - ctxt->msr_tsc_aux = hvm_msr_tsc_aux(v); + ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v); hvm_get_segment_register(v, x86_seg_idtr, &seg); - ctxt->idtr_limit = seg.limit; - ctxt->idtr_base = seg.base; + ctxt.idtr_limit = seg.limit; + ctxt.idtr_base = seg.base; hvm_get_segment_register(v, x86_seg_gdtr, &seg); - ctxt->gdtr_limit = seg.limit; - ctxt->gdtr_base = seg.base; + ctxt.gdtr_limit = seg.limit; + ctxt.gdtr_base = seg.base; hvm_get_segment_register(v, x86_seg_cs, &seg); - ctxt->cs_sel = seg.sel; - ctxt->cs_limit = seg.limit; - ctxt->cs_base = seg.base; - ctxt->cs_arbytes = seg.attr; + ctxt.cs_sel = seg.sel; + ctxt.cs_limit = seg.limit; + ctxt.cs_base = seg.base; + ctxt.cs_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_ds, &seg); - ctxt->ds_sel = seg.sel; - ctxt->ds_limit = seg.limit; - ctxt->ds_base = seg.base; - ctxt->ds_arbytes = seg.attr; + ctxt.ds_sel = seg.sel; + ctxt.ds_limit = seg.limit; + ctxt.ds_base = seg.base; + ctxt.ds_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_es, &seg); - ctxt->es_sel = seg.sel; - ctxt->es_limit = seg.limit; - ctxt->es_base = seg.base; - ctxt->es_arbytes = seg.attr; + ctxt.es_sel = seg.sel; + ctxt.es_limit = seg.limit; + ctxt.es_base = seg.base; + ctxt.es_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_ss, &seg); - ctxt->ss_sel = seg.sel; - ctxt->ss_limit = seg.limit; - ctxt->ss_base = seg.base; - ctxt->ss_arbytes = seg.attr; + ctxt.ss_sel = seg.sel; + ctxt.ss_limit = seg.limit; + ctxt.ss_base = seg.base; + ctxt.ss_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_fs, &seg); - ctxt->fs_sel = seg.sel; - ctxt->fs_limit = seg.limit; - ctxt->fs_base = seg.base; - ctxt->fs_arbytes = seg.attr; + ctxt.fs_sel = seg.sel; + ctxt.fs_limit = seg.limit; + ctxt.fs_base = seg.base; + ctxt.fs_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_gs, &seg); - ctxt->gs_sel = seg.sel; - ctxt->gs_limit = seg.limit; - ctxt->gs_base = seg.base; - ctxt->gs_arbytes = seg.attr; + ctxt.gs_sel = seg.sel; + ctxt.gs_limit = seg.limit; + ctxt.gs_base = seg.base; + ctxt.gs_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_tr, &seg); - ctxt->tr_sel = seg.sel; - ctxt->tr_limit = seg.limit; - ctxt->tr_base = seg.base; - ctxt->tr_arbytes = seg.attr; + ctxt.tr_sel = seg.sel; + ctxt.tr_limit = seg.limit; + ctxt.tr_base = seg.base; + ctxt.tr_arbytes = seg.attr; hvm_get_segment_register(v, x86_seg_ldtr, &seg); - ctxt->ldtr_sel = seg.sel; - ctxt->ldtr_limit = seg.limit; - ctxt->ldtr_base = seg.base; - ctxt->ldtr_arbytes = seg.attr; + ctxt.ldtr_sel = seg.sel; + ctxt.ldtr_limit = seg.limit; + ctxt.ldtr_base = seg.base; + ctxt.ldtr_arbytes = seg.attr; if ( v->fpu_initialised ) { - memcpy(ctxt->fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt->fpu_regs)); - ctxt->flags = XEN_X86_FPU_INITIALISED; - } - - ctxt->rax = v->arch.user_regs.rax; - ctxt->rbx = v->arch.user_regs.rbx; - ctxt->rcx = v->arch.user_regs.rcx; - ctxt->rdx = v->arch.user_regs.rdx; - ctxt->rbp = v->arch.user_regs.rbp; - ctxt->rsi = v->arch.user_regs.rsi; - ctxt->rdi = v->arch.user_regs.rdi; - ctxt->rsp = v->arch.user_regs.rsp; - ctxt->rip = v->arch.user_regs.rip; - ctxt->rflags = v->arch.user_regs.rflags; - ctxt->r8 = v->arch.user_regs.r8; - ctxt->r9 = v->arch.user_regs.r9; - ctxt->r10 = v->arch.user_regs.r10; - ctxt->r11 = v->arch.user_regs.r11; - ctxt->r12 = v->arch.user_regs.r12; - ctxt->r13 = v->arch.user_regs.r13; - ctxt->r14 = v->arch.user_regs.r14; - ctxt->r15 = v->arch.user_regs.r15; - ctxt->dr0 = v->arch.debugreg[0]; - ctxt->dr1 = v->arch.debugreg[1]; - ctxt->dr2 = v->arch.debugreg[2]; - ctxt->dr3 = v->arch.debugreg[3]; - ctxt->dr6 = v->arch.debugreg[6]; - ctxt->dr7 = v->arch.debugreg[7]; -} - -static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; - struct hvm_hw_cpu ctxt; - - for_each_vcpu ( d, v ) - { - /* We don't need to save state for a vcpu that is down; the restore - * code will leave it down if there is nothing saved. */ - if ( v->pause_flags & VPF_down ) - continue; - - memset(&ctxt, 0, sizeof(ctxt)); - hvm_save_cpu_ctxt_one(v, &ctxt); + memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs)); + ctxt.flags = XEN_X86_FPU_INITIALISED; + } + + ctxt.rax = v->arch.user_regs.rax; + ctxt.rbx = v->arch.user_regs.rbx; + ctxt.rcx = v->arch.user_regs.rcx; + ctxt.rdx = v->arch.user_regs.rdx; + ctxt.rbp = v->arch.user_regs.rbp; + ctxt.rsi = v->arch.user_regs.rsi; + ctxt.rdi = v->arch.user_regs.rdi; + ctxt.rsp = v->arch.user_regs.rsp; + ctxt.rip = v->arch.user_regs.rip; + ctxt.rflags = v->arch.user_regs.rflags; + ctxt.r8 = v->arch.user_regs.r8; + ctxt.r9 = v->arch.user_regs.r9; + ctxt.r10 = v->arch.user_regs.r10; + ctxt.r11 = v->arch.user_regs.r11; + ctxt.r12 = v->arch.user_regs.r12; + ctxt.r13 = v->arch.user_regs.r13; + ctxt.r14 = v->arch.user_regs.r14; + ctxt.r15 = v->arch.user_regs.r15; + ctxt.dr0 = v->arch.debugreg[0]; + ctxt.dr1 = v->arch.debugreg[1]; + ctxt.dr2 = v->arch.debugreg[2]; + ctxt.dr3 = v->arch.debugreg[3]; + ctxt.dr6 = v->arch.debugreg[6]; + ctxt.dr7 = v->arch.debugreg[7]; + + if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 ) + return 1; - if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 ) - return 1; - } return 0; } @@ -1176,43 +1155,29 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt, +HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt_one, hvm_load_cpu_ctxt, 1, HVMSR_PER_VCPU); #define HVM_CPU_XSAVE_SIZE(xcr0) (offsetof(struct hvm_hw_cpu_xsave, \ save_area) + \ xstate_ctxt_size(xcr0)) -static void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave *ctxt) +static int hvm_save_cpu_xsave_states_one(struct vcpu *v, hvm_domain_context_t *h) { + unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum); + struct hvm_hw_cpu_xsave *ctxt; + + if ( !xsave_enabled(v) ) + return 2; + if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) ) + return 1; + ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur]; + h->cur += size; ctxt->xfeature_mask = xfeature_mask; ctxt->xcr0 = v->arch.xcr0; ctxt->xcr0_accum = v->arch.xcr0_accum; -} - -static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; - struct hvm_hw_cpu_xsave *ctxt; - - if ( !cpu_has_xsave ) - return 0; /* do nothing */ - - for_each_vcpu ( d, v ) - { - unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum); - - if ( !xsave_enabled(v) ) - continue; - if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) ) - return 1; - ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur]; - h->cur += size; - - hvm_save_cpu_xsave_states_one(v, ctxt); - expand_xsave_states(v, &ctxt->save_area, - size - offsetof(typeof(*ctxt), save_area)); - } + expand_xsave_states(v, &ctxt->save_area, + size - offsetof(typeof(*ctxt), save_area)); return 0; } @@ -1354,10 +1319,18 @@ static const uint32_t msrs_to_send[] = { }; static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send); -static int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr *ctxt) +static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h) { + + struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); + struct hvm_msr *ctxt; unsigned int i; + if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, + HVM_CPU_MSR_SIZE(msr_count_max)) ) + return 1; + ctxt = (struct hvm_msr *)&h->data[h->cur]; + ctxt->count = 0; for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i ) { uint64_t val; @@ -1383,45 +1356,23 @@ static int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr *ctxt) ctxt->msr[ctxt->count].index = msrs_to_send[i]; ctxt->msr[ctxt->count++].val = val; } - return 0; -} - -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; - - for_each_vcpu ( d, v ) - { - struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); - struct hvm_msr *ctxt; - unsigned int i; - - if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, - HVM_CPU_MSR_SIZE(msr_count_max)) ) - return 1; - ctxt = (struct hvm_msr *)&h->data[h->cur]; - ctxt->count = 0; + if ( hvm_funcs.save_msr ) + hvm_funcs.save_msr(v, ctxt); - hvm_save_cpu_msrs_one(v, ctxt); + ASSERT(ctxt->count <= msr_count_max); - if ( hvm_funcs.save_msr ) - hvm_funcs.save_msr(v, ctxt); - - ASSERT(ctxt->count <= msr_count_max); - - for ( i = 0; i < ctxt->count; ++i ) - ctxt->msr[i]._rsvd = 0; + for ( i = 0; i < ctxt->count; ++i ) + ctxt->msr[i]._rsvd = 0; - if ( ctxt->count ) - { - /* Rewrite length to indicate how much space we actually used. */ - desc->length = HVM_CPU_MSR_SIZE(ctxt->count); - h->cur += HVM_CPU_MSR_SIZE(ctxt->count); - } - else - /* or rewind and remove the descriptor from the stream. */ - h->cur -= sizeof(struct hvm_save_descriptor); + if ( ctxt->count ) + { + /* Rewrite length to indicate how much space we actually used. */ + desc->length = HVM_CPU_MSR_SIZE(ctxt->count); + h->cur += HVM_CPU_MSR_SIZE(ctxt->count); } + else + /* or rewind and remove the descriptor from the stream. */ + h->cur -= sizeof(struct hvm_save_descriptor); return 0; } @@ -1517,7 +1468,7 @@ static int __init hvm_register_CPU_save_and_restore(void) { hvm_register_savevm(CPU_XSAVE_CODE, "CPU_XSAVE", - hvm_save_cpu_xsave_states, + hvm_save_cpu_xsave_states_one, hvm_load_cpu_xsave_states, HVM_CPU_XSAVE_SIZE(xfeature_mask) + sizeof(struct hvm_save_descriptor), @@ -1529,7 +1480,7 @@ static int __init hvm_register_CPU_save_and_restore(void) if ( msr_count_max ) hvm_register_savevm(CPU_MSR_CODE, "CPU_MSR", - hvm_save_cpu_msrs, + hvm_save_cpu_msrs_one, hvm_load_cpu_msrs, HVM_CPU_MSR_SIZE(msr_count_max) + sizeof(struct hvm_save_descriptor), diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c index 992f08d..e0d2255 100644 --- a/xen/arch/x86/hvm/i8254.c +++ b/xen/arch/x86/hvm/i8254.c @@ -390,8 +390,9 @@ void pit_stop_channel0_irq(PITState *pit) spin_unlock(&pit->lock); } -static int pit_save(struct domain *d, hvm_domain_context_t *h) +static int pit_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; PITState *pit = domain_vpit(d); int rc; diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index c85d004..72acb73 100644 --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -630,8 +630,9 @@ static int __init dump_irq_info_key_init(void) } __initcall(dump_irq_info_key_init); -static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) +static int irq_save_pci(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); unsigned int asserted, pdev, pintx; int rc; @@ -662,16 +663,18 @@ static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) return rc; } -static int irq_save_isa(struct domain *d, hvm_domain_context_t *h) +static int irq_save_isa(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save ISA IRQ lines */ return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) ); } -static int irq_save_link(struct domain *d, hvm_domain_context_t *h) +static int irq_save_link(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_irq *hvm_irq = hvm_domain_irq(d); /* Save PCI-ISA link state */ diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index d311031..b0e0a60 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -666,46 +666,36 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, return 0; } -static void hvm_save_mtrr_msr_one(struct vcpu *v, struct hvm_hw_mtrr *hw_mtrr) +static int hvm_save_mtrr_msr_one(struct vcpu *v, hvm_domain_context_t *h) { - struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr; int i; + struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr; + struct hvm_hw_mtrr hw_mtrr; + /* save mtrr&pat */ - hvm_get_guest_pat(v, &hw_mtrr->msr_pat_cr); + hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr); - hw_mtrr->msr_mtrr_def_type = mtrr_state->def_type + hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type | (mtrr_state->enabled << 10); - hw_mtrr->msr_mtrr_cap = mtrr_state->mtrr_cap; + hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap; for ( i = 0; i < MTRR_VCNT; i++ ) { /* save physbase */ - hw_mtrr->msr_mtrr_var[i*2] = + hw_mtrr.msr_mtrr_var[i*2] = ((uint64_t*)mtrr_state->var_ranges)[i*2]; /* save physmask */ - hw_mtrr->msr_mtrr_var[i*2+1] = + hw_mtrr.msr_mtrr_var[i*2+1] = ((uint64_t*)mtrr_state->var_ranges)[i*2+1]; } for ( i = 0; i < NUM_FIXED_MSR; i++ ) - hw_mtrr->msr_mtrr_fixed[i] = + hw_mtrr.msr_mtrr_fixed[i] = ((uint64_t*)mtrr_state->fixed_ranges)[i]; -} - -static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; - struct hvm_hw_mtrr hw_mtrr; - /* save mtrr&pat */ - - for_each_vcpu(d, v) - { - hvm_save_mtrr_msr_one(v, &hw_mtrr); + if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 ) + return 1; - if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 ) - return 1; - } return 0; } @@ -751,7 +741,7 @@ static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr, +HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr_one, hvm_load_mtrr_msr, 1, HVMSR_PER_VCPU); void memory_type_changed(struct domain *d) diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c index 435647f..d8dcbc2 100644 --- a/xen/arch/x86/hvm/pmtimer.c +++ b/xen/arch/x86/hvm/pmtimer.c @@ -249,8 +249,9 @@ static int handle_pmt_io( return X86EMUL_OKAY; } -static int acpi_save(struct domain *d, hvm_domain_context_t *h) +static int acpi_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi; PMTState *s = &d->arch.hvm_domain.pl_time->vpmt; uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB; diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c index cb75b99..58b70fc 100644 --- a/xen/arch/x86/hvm/rtc.c +++ b/xen/arch/x86/hvm/rtc.c @@ -737,8 +737,9 @@ void rtc_migrate_timers(struct vcpu *v) } /* Save RTC hardware state */ -static int rtc_save(struct domain *d, hvm_domain_context_t *h) +static int rtc_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; RTCState *s = domain_vrtc(d); int rc; diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c index 8984a23..fe24f08 100644 --- a/xen/arch/x86/hvm/save.c +++ b/xen/arch/x86/hvm/save.c @@ -135,9 +135,12 @@ size_t hvm_save_size(struct domain *d) int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz) { - int rv; + int rv = 0; hvm_domain_context_t ctxt = { }; const struct hvm_save_descriptor *desc; + bool is_single_instance = false; + uint32_t off = 0; + struct vcpu *v; if ( d->is_dying || typecode > HVM_SAVE_CODE_MAX || @@ -145,43 +148,89 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, !hvm_sr_handlers[typecode].save ) return -EINVAL; + if( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU && + instance < d->max_vcpus ) + is_single_instance = true; + ctxt.size = hvm_sr_handlers[typecode].size; - if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU ) + if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU && + instance == d->max_vcpus ) ctxt.size *= d->max_vcpus; ctxt.data = xmalloc_bytes(ctxt.size); if ( !ctxt.data ) return -ENOMEM; - if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 ) - printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", - d->domain_id, typecode, rv); - else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) ) - { - uint32_t off; - for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length ) + if( is_single_instance ) + vcpu_pause(d->vcpu[instance]); + else + domain_pause(d); + + if( is_single_instance ) + { + if ( (rv = hvm_sr_handlers[typecode].save(d->vcpu[instance], + &ctxt)) != 0 ) { - desc = (void *)(ctxt.data + off); - /* Move past header */ - off += sizeof(*desc); - if ( ctxt.cur < desc->length || - off > ctxt.cur - desc->length ) - break; - if ( instance == desc->instance ) - { - rv = 0; - if ( guest_handle_is_null(handle) ) - *bufsz = desc->length; - else if ( *bufsz < desc->length ) - rv = -ENOBUFS; - else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) - rv = -EFAULT; - else - *bufsz = desc->length; - break; - } + printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", + d->domain_id, typecode, rv); + vcpu_unpause(d->vcpu[instance]); + } + else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) ) + { + desc = (void *)(ctxt.data); + /* Move past header */ + off = sizeof(*desc); + if ( ctxt.cur < desc->length || + off > ctxt.cur - desc->length ) + rv = -EFAULT; + rv = 0; + if ( guest_handle_is_null(handle) ) + *bufsz = desc->length; + else if ( *bufsz < desc->length ) + rv = -ENOBUFS; + else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) + rv = -EFAULT; + else + *bufsz = desc->length; + vcpu_unpause(d->vcpu[instance]); } } + else + { + for_each_vcpu ( d, v ) + { + if ( (rv = hvm_sr_handlers[typecode].save(d->vcpu[instance], + &ctxt)) != 0 ) + { + printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", + d->domain_id, typecode, rv); + } + else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) ) + { + desc = (void *)(ctxt.data + off); + /* Move past header */ + off += sizeof(*desc); + if ( ctxt.cur < desc->length || + off > ctxt.cur - desc->length ) + break; + if ( instance == desc->instance ) + { + rv = 0; + if ( guest_handle_is_null(handle) ) + *bufsz = desc->length; + else if ( *bufsz < desc->length ) + rv = -ENOBUFS; + else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) + rv = -EFAULT; + else + *bufsz = desc->length; + break; + } + off += desc->length; + } + } + domain_unpause(d); + } xfree(ctxt.data); return rv; @@ -193,7 +242,8 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) struct hvm_save_header hdr; struct hvm_save_end end; hvm_save_handler handler; - unsigned int i; + unsigned int i, rc; + struct vcpu *v = NULL; if ( d->is_dying ) return -EINVAL; @@ -225,12 +275,19 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h) { printk(XENLOG_G_INFO "HVM%d save: %s\n", d->domain_id, hvm_sr_handlers[i].name); - if ( handler(d, h) != 0 ) + for_each_vcpu ( d, v ) { - printk(XENLOG_G_ERR - "HVM%d save: failed to save type %"PRIu16"\n", - d->domain_id, i); - return -EFAULT; + rc = handler(v, h); + if( rc == 2 ) + continue; + + if( rc != 0 ) + { + printk(XENLOG_G_ERR + "HVM%d save: failed to save type %"PRIu16"\n", + d->domain_id, i); + return -EFAULT; + } } } } diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 97b419f..86d02cf 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -569,8 +569,9 @@ int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi) return vioapic->redirtbl[pin].fields.trig_mode; } -static int ioapic_save(struct domain *d, hvm_domain_context_t *h) +static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_vioapic *s; if ( !has_vioapic(d) ) diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c index d22c8ac..8a7c592 100644 --- a/xen/arch/x86/hvm/viridian.c +++ b/xen/arch/x86/hvm/viridian.c @@ -990,8 +990,9 @@ out: return HVM_HCALL_completed; } -static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h) +static int viridian_save_domain_ctxt(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_viridian_domain_context ctxt = { .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val, .hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw, @@ -1026,28 +1027,19 @@ static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h) HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt, viridian_load_domain_ctxt, 1, HVMSR_PER_DOM); -static void viridian_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt) +static int viridian_save_vcpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h) { - ctxt->vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw; - ctxt->vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending; - *ctxt->_pad = 0; -} - -static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) -{ - struct vcpu *v; + struct domain *d = v->domain; + struct hvm_viridian_vcpu_context ctxt = { + .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw, + .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending, + }; if ( !is_viridian_domain(d) ) return 0; - for_each_vcpu( d, v ) { - struct hvm_viridian_vcpu_context ctxt; - - viridian_save_vcpu_ctxt_one(v, &ctxt); - - if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) - return 1; - } + if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) + return 1; return 0; } @@ -1082,7 +1074,7 @@ static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) return 0; } -HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt, +HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt_one, viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU); static int __init parse_viridian_version(const char *arg) diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 1b9f00a..ab35400 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1435,43 +1435,35 @@ static void lapic_rearm(struct vlapic *s) s->timer_last_update = s->pt.last_plt_gtime; } -static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h) +static int lapic_save_hidden(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + struct domain *d = v->domain; struct vlapic *s; int rc = 0; if ( !has_vlapic(d) ) return 0; - for_each_vcpu ( d, v ) - { - s = vcpu_vlapic(v); - if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 ) - break; - } + s = vcpu_vlapic(v); + rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw); return rc; } -static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h) +static int lapic_save_regs(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + struct domain *d = v->domain; struct vlapic *s; int rc = 0; if ( !has_vlapic(d) ) return 0; - for_each_vcpu ( d, v ) - { - if ( hvm_funcs.sync_pir_to_irr ) - hvm_funcs.sync_pir_to_irr(v); + if ( hvm_funcs.sync_pir_to_irr ) + hvm_funcs.sync_pir_to_irr(v); - s = vcpu_vlapic(v); - if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 ) - break; - } + s = vcpu_vlapic(v); + rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs); return rc; } diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c index e160bbd..bad5066 100644 --- a/xen/arch/x86/hvm/vpic.c +++ b/xen/arch/x86/hvm/vpic.c @@ -371,8 +371,9 @@ static int vpic_intercept_elcr_io( return X86EMUL_OKAY; } -static int vpic_save(struct domain *d, hvm_domain_context_t *h) +static int vpic_save(struct vcpu *v, hvm_domain_context_t *h) { + struct domain *d = v->domain; struct hvm_hw_vpic *s; int i; diff --git a/xen/include/asm-x86/hvm/save.h b/xen/include/asm-x86/hvm/save.h index f889e8f..fe642ab 100644 --- a/xen/include/asm-x86/hvm/save.h +++ b/xen/include/asm-x86/hvm/save.h @@ -95,7 +95,7 @@ static inline uint16_t hvm_load_instance(struct hvm_domain_context *h) * The save handler may save multiple instances of a type into the buffer; * the load handler will be called once for each instance found when * restoring. Both return non-zero on error. */ -typedef int (*hvm_save_handler) (struct domain *d, +typedef int (*hvm_save_handler) (struct vcpu *v, hvm_domain_context_t *h); typedef int (*hvm_load_handler) (struct domain *d, hvm_domain_context_t *h); -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |