|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v14 03/11] x86/hvm: Introduce hvm_save_cpu_ctxt_one func
>>> On 25.07.18 at 14:14, <aisaila@xxxxxxxxxxxxxxx> wrote:
> This is used to save data from a single instance.
>
> Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
>
> ---
> Changes since V12:
> - Changed memset to {} init.
> ---
> xen/arch/x86/hvm/hvm.c | 214
> +++++++++++++++++++++++++------------------------
> 1 file changed, 111 insertions(+), 103 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index d90da9a..720204c 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -787,119 +787,127 @@ static int hvm_load_tsc_adjust(struct domain *d,
> hvm_domain_context_t *h)
> HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
> hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
>
> +static int hvm_save_cpu_ctxt_one(struct vcpu *v, hvm_domain_context_t *h)
> +{
> + struct segment_register seg;
> + struct hvm_hw_cpu ctxt = {};
I think you want to move into this initializer anything that can go
there and that does not conflict with ...
> + /* Architecture-specific vmcs/vmcb bits */
> + hvm_funcs.save_cpu_ctxt(v, &ctxt);
... this call. No need to write all these fields twice.
> static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
> {
> struct vcpu *v;
> - struct hvm_hw_cpu ctxt;
> - struct segment_register seg;
> + int err = 0;
>
> for_each_vcpu ( d, v )
> {
> - /* We don't need to save state for a vcpu that is down; the restore
> - * code will leave it down if there is nothing saved. */
> + /*
> + * We don't need to save state for a vcpu that is down; the restore
> + * code will leave it down if there is nothing saved.
> + */
> if ( v->pause_flags & VPF_down )
> continue;
>
> - memset(&ctxt, 0, sizeof(ctxt));
> -
> - /* Architecture-specific vmcs/vmcb bits */
> - hvm_funcs.save_cpu_ctxt(v, &ctxt);
> -
> - ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
> -
> - ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
> -
> - hvm_get_segment_register(v, x86_seg_idtr, &seg);
> - ctxt.idtr_limit = seg.limit;
> - ctxt.idtr_base = seg.base;
> -
> - hvm_get_segment_register(v, x86_seg_gdtr, &seg);
> - ctxt.gdtr_limit = seg.limit;
> - ctxt.gdtr_base = seg.base;
> -
> - hvm_get_segment_register(v, x86_seg_cs, &seg);
> - ctxt.cs_sel = seg.sel;
> - ctxt.cs_limit = seg.limit;
> - ctxt.cs_base = seg.base;
> - ctxt.cs_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_ds, &seg);
> - ctxt.ds_sel = seg.sel;
> - ctxt.ds_limit = seg.limit;
> - ctxt.ds_base = seg.base;
> - ctxt.ds_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_es, &seg);
> - ctxt.es_sel = seg.sel;
> - ctxt.es_limit = seg.limit;
> - ctxt.es_base = seg.base;
> - ctxt.es_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_ss, &seg);
> - ctxt.ss_sel = seg.sel;
> - ctxt.ss_limit = seg.limit;
> - ctxt.ss_base = seg.base;
> - ctxt.ss_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_fs, &seg);
> - ctxt.fs_sel = seg.sel;
> - ctxt.fs_limit = seg.limit;
> - ctxt.fs_base = seg.base;
> - ctxt.fs_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_gs, &seg);
> - ctxt.gs_sel = seg.sel;
> - ctxt.gs_limit = seg.limit;
> - ctxt.gs_base = seg.base;
> - ctxt.gs_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_tr, &seg);
> - ctxt.tr_sel = seg.sel;
> - ctxt.tr_limit = seg.limit;
> - ctxt.tr_base = seg.base;
> - ctxt.tr_arbytes = seg.attr;
> -
> - hvm_get_segment_register(v, x86_seg_ldtr, &seg);
> - ctxt.ldtr_sel = seg.sel;
> - ctxt.ldtr_limit = seg.limit;
> - ctxt.ldtr_base = seg.base;
> - ctxt.ldtr_arbytes = seg.attr;
> -
> - if ( v->fpu_initialised )
> - {
> - memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
> - ctxt.flags = XEN_X86_FPU_INITIALISED;
> - }
> -
> - ctxt.rax = v->arch.user_regs.rax;
> - ctxt.rbx = v->arch.user_regs.rbx;
> - ctxt.rcx = v->arch.user_regs.rcx;
> - ctxt.rdx = v->arch.user_regs.rdx;
> - ctxt.rbp = v->arch.user_regs.rbp;
> - ctxt.rsi = v->arch.user_regs.rsi;
> - ctxt.rdi = v->arch.user_regs.rdi;
> - ctxt.rsp = v->arch.user_regs.rsp;
> - ctxt.rip = v->arch.user_regs.rip;
> - ctxt.rflags = v->arch.user_regs.rflags;
> - ctxt.r8 = v->arch.user_regs.r8;
> - ctxt.r9 = v->arch.user_regs.r9;
> - ctxt.r10 = v->arch.user_regs.r10;
> - ctxt.r11 = v->arch.user_regs.r11;
> - ctxt.r12 = v->arch.user_regs.r12;
> - ctxt.r13 = v->arch.user_regs.r13;
> - ctxt.r14 = v->arch.user_regs.r14;
> - ctxt.r15 = v->arch.user_regs.r15;
> - ctxt.dr0 = v->arch.debugreg[0];
> - ctxt.dr1 = v->arch.debugreg[1];
> - ctxt.dr2 = v->arch.debugreg[2];
> - ctxt.dr3 = v->arch.debugreg[3];
> - ctxt.dr6 = v->arch.debugreg[6];
> - ctxt.dr7 = v->arch.debugreg[7];
> -
> - if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
> - return 1;
> + err = hvm_save_cpu_ctxt_one(v, h);
> + if ( err )
> + break;
> }
> - return 0;
> + return err;
> }
Please take the opportunity and add the missing blank line ahead of
this main return statement of the function.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |