[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v17 05/13] x86/hvm: Introduce hvm_save_cpu_msrs_one func
This is used to save data from a single instance. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- Changes since V14: - Remove err init - Add blank line ahead of return. --- xen/arch/x86/hvm/hvm.c | 106 +++++++++++++++++++++++++++---------------------- 1 file changed, 59 insertions(+), 47 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 5b0820e..7df8744 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1364,69 +1364,81 @@ static const uint32_t msrs_to_send[] = { }; static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send); -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) +static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h) { - struct vcpu *v; + struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); + struct hvm_msr *ctxt; + unsigned int i; + int err; - for_each_vcpu ( d, v ) + err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, + HVM_CPU_MSR_SIZE(msr_count_max)); + if ( err ) + return err; + ctxt = (struct hvm_msr *)&h->data[h->cur]; + ctxt->count = 0; + + for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i ) { - struct hvm_save_descriptor *desc = _p(&h->data[h->cur]); - struct hvm_msr *ctxt; - unsigned int i; + uint64_t val; + int rc = guest_rdmsr(v, msrs_to_send[i], &val); - if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id, - HVM_CPU_MSR_SIZE(msr_count_max)) ) - return 1; - ctxt = (struct hvm_msr *)&h->data[h->cur]; - ctxt->count = 0; + /* + * It is the programmers responsibility to ensure that + * msrs_to_send[] contain generally-read/write MSRs. + * X86EMUL_EXCEPTION here implies a missing feature, and that the + * guest doesn't have access to the MSR. + */ + if ( rc == X86EMUL_EXCEPTION ) + continue; - for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i ) + if ( rc != X86EMUL_OKAY ) { - uint64_t val; - int rc = guest_rdmsr(v, msrs_to_send[i], &val); + ASSERT_UNREACHABLE(); + return -ENXIO; + } - /* - * It is the programmers responsibility to ensure that - * msrs_to_send[] contain generally-read/write MSRs. - * X86EMUL_EXCEPTION here implies a missing feature, and that the - * guest doesn't have access to the MSR. - */ - if ( rc == X86EMUL_EXCEPTION ) - continue; + if ( !val ) + continue; /* Skip empty MSRs. */ - if ( rc != X86EMUL_OKAY ) - { - ASSERT_UNREACHABLE(); - return -ENXIO; - } + ctxt->msr[ctxt->count].index = msrs_to_send[i]; + ctxt->msr[ctxt->count++].val = val; + } - if ( !val ) - continue; /* Skip empty MSRs. */ + if ( hvm_funcs.save_msr ) + hvm_funcs.save_msr(v, ctxt); - ctxt->msr[ctxt->count].index = msrs_to_send[i]; - ctxt->msr[ctxt->count++].val = val; - } + ASSERT(ctxt->count <= msr_count_max); - if ( hvm_funcs.save_msr ) - hvm_funcs.save_msr(v, ctxt); + for ( i = 0; i < ctxt->count; ++i ) + ctxt->msr[i]._rsvd = 0; - ASSERT(ctxt->count <= msr_count_max); + if ( ctxt->count ) + { + /* Rewrite length to indicate how much space we actually used. */ + desc->length = HVM_CPU_MSR_SIZE(ctxt->count); + h->cur += HVM_CPU_MSR_SIZE(ctxt->count); + } + else + /* or rewind and remove the descriptor from the stream. */ + h->cur -= sizeof(struct hvm_save_descriptor); - for ( i = 0; i < ctxt->count; ++i ) - ctxt->msr[i]._rsvd = 0; + return 0; +} - if ( ctxt->count ) - { - /* Rewrite length to indicate how much space we actually used. */ - desc->length = HVM_CPU_MSR_SIZE(ctxt->count); - h->cur += HVM_CPU_MSR_SIZE(ctxt->count); - } - else - /* or rewind and remove the descriptor from the stream. */ - h->cur -= sizeof(struct hvm_save_descriptor); +static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h) +{ + struct vcpu *v; + int err = 0; + + for_each_vcpu ( d, v ) + { + err = hvm_save_cpu_msrs_one(v, h); + if ( err ) + break; } - return 0; + return err; } static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h) -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |