[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v10 05/11] x86/hvm: Introduce hvm_save_cpu_msrs_one func



> -----Original Message-----
> From: Alexandru Isaila [mailto:aisaila@xxxxxxxxxxxxxxx]
> Sent: 04 July 2018 14:32
> To: xen-devel@xxxxxxxxxxxxx
> Cc: Ian Jackson <Ian.Jackson@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>;
> jbeulich@xxxxxxxx; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul
> Durrant <Paul.Durrant@xxxxxxxxxx>; Alexandru Isaila
> <aisaila@xxxxxxxxxxxxxxx>
> Subject: [PATCH v10 05/11] x86/hvm: Introduce hvm_save_cpu_msrs_one
> func
> 
> This is used to save data from a single instance.
> 
> Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> 
> ---
> Changes since V7:
>       - Moved the init of ctxt->count to hvm_save_cpu_msrs_one()
> ---
>  xen/arch/x86/hvm/hvm.c | 101 +++++++++++++++++++++++++++-----------
> -----------
>  1 file changed, 55 insertions(+), 46 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 495abe5..9ff9954 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1362,66 +1362,75 @@ static const uint32_t msrs_to_send[] = {
>  };
>  static unsigned int __read_mostly msr_count_max =
> ARRAY_SIZE(msrs_to_send);
> 
> -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t
> *h)
> +static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t
> *h)
>  {
> -    struct vcpu *v;
> +    unsigned int i;
> +    struct hvm_msr *ctxt;
> +    struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
> 
> -    for_each_vcpu ( d, v )
> +    if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
> +                         HVM_CPU_MSR_SIZE(msr_count_max)) )
> +        return 1;
> +    ctxt = (struct hvm_msr *)&h->data[h->cur];
> +
> +    ctxt->count = 0;
> +    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
>      {
> -        struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
> -        struct hvm_msr *ctxt;
> -        unsigned int i;
> +        uint64_t val;
> +        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
> 
> -        if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
> -                             HVM_CPU_MSR_SIZE(msr_count_max)) )
> -            return 1;
> -        ctxt = (struct hvm_msr *)&h->data[h->cur];
> -        ctxt->count = 0;
> +        /*
> +         * It is the programmers responsibility to ensure that
> +         * msrs_to_send[] contain generally-read/write MSRs.
> +         * X86EMUL_EXCEPTION here implies a missing feature, and that the
> +         * guest doesn't have access to the MSR.
> +         */
> +        if ( rc == X86EMUL_EXCEPTION )
> +            continue;
> 
> -        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
> +        if ( rc != X86EMUL_OKAY )
>          {
> -            uint64_t val;
> -            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
> +            ASSERT_UNREACHABLE();
> +            return -ENXIO;
> +        }
> 
> -            /*
> -             * It is the programmers responsibility to ensure that
> -             * msrs_to_send[] contain generally-read/write MSRs.
> -             * X86EMUL_EXCEPTION here implies a missing feature, and that the
> -             * guest doesn't have access to the MSR.
> -             */
> -            if ( rc == X86EMUL_EXCEPTION )
> -                continue;
> +        if ( !val )
> +            continue; /* Skip empty MSRs. */
> 
> -            if ( rc != X86EMUL_OKAY )
> -            {
> -                ASSERT_UNREACHABLE();
> -                return -ENXIO;
> -            }
> +        ctxt->msr[ctxt->count].index = msrs_to_send[i];
> +        ctxt->msr[ctxt->count++].val = val;
> +    }
> 
> -            if ( !val )
> -                continue; /* Skip empty MSRs. */
> +    if ( hvm_funcs.save_msr )
> +        hvm_funcs.save_msr(v, ctxt);
> 
> -            ctxt->msr[ctxt->count].index = msrs_to_send[i];
> -            ctxt->msr[ctxt->count++].val = val;
> -        }
> +    ASSERT(ctxt->count <= msr_count_max);
> 
> -        if ( hvm_funcs.save_msr )
> -            hvm_funcs.save_msr(v, ctxt);
> +    for ( i = 0; i < ctxt->count; ++i )
> +        ctxt->msr[i]._rsvd = 0;
> 
> -        ASSERT(ctxt->count <= msr_count_max);
> +    if ( ctxt->count )
> +    {
> +        /* Rewrite length to indicate how much space we actually used. */
> +        desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
> +        h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
> +    }
> +    else
> +        /* or rewind and remove the descriptor from the stream. */
> +        h->cur -= sizeof(struct hvm_save_descriptor);
> 
> -        for ( i = 0; i < ctxt->count; ++i )
> -            ctxt->msr[i]._rsvd = 0;
> +    return 0;
> +}
> 
> -        if ( ctxt->count )
> -        {
> -            /* Rewrite length to indicate how much space we actually used. */
> -            desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
> -            h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
> -        }
> -        else
> -            /* or rewind and remove the descriptor from the stream. */
> -            h->cur -= sizeof(struct hvm_save_descriptor);
> +
> +static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t
> *h)
> +{
> +    struct vcpu *v;
> +
> +    for_each_vcpu ( d, v )
> +    {
> +        if ( hvm_save_cpu_msrs_one(v, h) != 0 )
> +            return 1;
>      }
> 
>      return 0;
> --
> 2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.