[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 1/2] x86/hvm: Introduce *save_one() functions


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Mon, 7 May 2018 11:24:00 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Mon, 07 May 2018 08:24:22 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=D+EFtBL5Hvr6aKDF+hadQsoz3JEYXlwjM8q3gw2OSUNeOit8UkB+OPh7IoVR1PsBBlrytSwgfexxFQtCe6Mae0FslqI50YPKmOJWCBLnq0ywlA3ot7Ttn0nyIN00udcW3DvaeF7bt4peaKGTul3Ih12DKGpRJB1RF518kXieIVBeuM/anCITGMhI3XSjDaMgi9WJIm9dPuJNqNF9A/1sfbtx1MloUd9oI372t3kUridOjopn0KBrxxmlQRqp/K1XhD6IPkxToJhCIq6WR2+NjiIdbI0pr3/0O+vu4cyKo6WJj5UzbLwG9iDHafRciGkoUMIStZKfYdrtJAbFh2bVbQ==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This patch introduces save_one() functions. They will be called in the
*save() so we can extract data for a single instance.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V3:
        - Rb to the lateste staging version
        - Split the patch into 2 patches.
---
 xen/arch/x86/cpu/mcheck/vmce.c |  16 ++-
 xen/arch/x86/hvm/hvm.c         | 279 ++++++++++++++++++++++-------------------
 xen/arch/x86/hvm/mtrr.c        |  51 ++++----
 xen/arch/x86/hvm/viridian.c    |  13 +-
 4 files changed, 200 insertions(+), 159 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index e07cd2f..15b0f2a 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,6 +349,14 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
+void vmce_save_vcpu_ctxt_one(struct vcpu *v, struct hvm_vmce_vcpu *ctxt)
+{
+    ctxt->caps = v->arch.vmce.mcg_cap;
+    ctxt->mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
+    ctxt->mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
+    ctxt->mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl;
+}
+
 static int vmce_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -356,13 +364,9 @@ static int vmce_save_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        struct hvm_vmce_vcpu ctxt = {
-            .caps = v->arch.vmce.mcg_cap,
-            .mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
-            .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
-            .mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl,
-        };
+        struct hvm_vmce_vcpu ctxt;
 
+        vmce_save_vcpu_ctxt_one(v, &ctxt);
         err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);
         if ( err )
             break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c23983c..6733f26 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -740,6 +740,11 @@ void hvm_domain_destroy(struct domain *d)
     destroy_vpci_mmcfg(d);
 }
 
+void hvm_save_tsc_adjust_one(struct vcpu *v, struct hvm_tsc_adjust *ctxt)
+{
+    ctxt->tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+}
+
 static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -748,7 +753,7 @@ static int hvm_save_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        ctxt.tsc_adjust = v->arch.hvm_vcpu.msr_tsc_adjust;
+        hvm_save_tsc_adjust_one(v, &ctxt);
         err = hvm_save_entry(TSC_ADJUST, v->vcpu_id, h, &ctxt);
         if ( err )
             break;
@@ -780,11 +785,109 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
                           hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
 
+void hvm_save_cpu_ctxt_one(struct vcpu *v, struct hvm_hw_cpu *ctxt)
+{
+    struct segment_register seg;
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(v, ctxt);
+
+    ctxt->tsc = hvm_get_guest_tsc_fixed(v, 
v->domain->arch.hvm_domain.sync_tsc);
+
+    ctxt->msr_tsc_aux = hvm_msr_tsc_aux(v);
+
+    hvm_get_segment_register(v, x86_seg_idtr, &seg);
+    ctxt->idtr_limit = seg.limit;
+    ctxt->idtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+    ctxt->gdtr_limit = seg.limit;
+    ctxt->gdtr_base = seg.base;
+
+    hvm_get_segment_register(v, x86_seg_cs, &seg);
+    ctxt->cs_sel = seg.sel;
+    ctxt->cs_limit = seg.limit;
+    ctxt->cs_base = seg.base;
+    ctxt->cs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ds, &seg);
+    ctxt->ds_sel = seg.sel;
+    ctxt->ds_limit = seg.limit;
+    ctxt->ds_base = seg.base;
+    ctxt->ds_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_es, &seg);
+    ctxt->es_sel = seg.sel;
+    ctxt->es_limit = seg.limit;
+    ctxt->es_base = seg.base;
+    ctxt->es_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ss, &seg);
+    ctxt->ss_sel = seg.sel;
+    ctxt->ss_limit = seg.limit;
+    ctxt->ss_base = seg.base;
+    ctxt->ss_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_fs, &seg);
+    ctxt->fs_sel = seg.sel;
+    ctxt->fs_limit = seg.limit;
+    ctxt->fs_base = seg.base;
+    ctxt->fs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_gs, &seg);
+    ctxt->gs_sel = seg.sel;
+    ctxt->gs_limit = seg.limit;
+    ctxt->gs_base = seg.base;
+    ctxt->gs_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_tr, &seg);
+    ctxt->tr_sel = seg.sel;
+    ctxt->tr_limit = seg.limit;
+    ctxt->tr_base = seg.base;
+    ctxt->tr_arbytes = seg.attr;
+
+    hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+    ctxt->ldtr_sel = seg.sel;
+    ctxt->ldtr_limit = seg.limit;
+    ctxt->ldtr_base = seg.base;
+    ctxt->ldtr_arbytes = seg.attr;
+
+    if ( v->fpu_initialised )
+    {
+        memcpy(ctxt->fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt->fpu_regs));
+        ctxt->flags = XEN_X86_FPU_INITIALISED;
+    }
+
+    ctxt->rax = v->arch.user_regs.rax;
+    ctxt->rbx = v->arch.user_regs.rbx;
+    ctxt->rcx = v->arch.user_regs.rcx;
+    ctxt->rdx = v->arch.user_regs.rdx;
+    ctxt->rbp = v->arch.user_regs.rbp;
+    ctxt->rsi = v->arch.user_regs.rsi;
+    ctxt->rdi = v->arch.user_regs.rdi;
+    ctxt->rsp = v->arch.user_regs.rsp;
+    ctxt->rip = v->arch.user_regs.rip;
+    ctxt->rflags = v->arch.user_regs.rflags;
+    ctxt->r8  = v->arch.user_regs.r8;
+    ctxt->r9  = v->arch.user_regs.r9;
+    ctxt->r10 = v->arch.user_regs.r10;
+    ctxt->r11 = v->arch.user_regs.r11;
+    ctxt->r12 = v->arch.user_regs.r12;
+    ctxt->r13 = v->arch.user_regs.r13;
+    ctxt->r14 = v->arch.user_regs.r14;
+    ctxt->r15 = v->arch.user_regs.r15;
+    ctxt->dr0 = v->arch.debugreg[0];
+    ctxt->dr1 = v->arch.debugreg[1];
+    ctxt->dr2 = v->arch.debugreg[2];
+    ctxt->dr3 = v->arch.debugreg[3];
+    ctxt->dr6 = v->arch.debugreg[6];
+    ctxt->dr7 = v->arch.debugreg[7];
+}
+
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
     struct hvm_hw_cpu ctxt;
-    struct segment_register seg;
 
     for_each_vcpu ( d, v )
     {
@@ -795,99 +898,7 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
         memset(&ctxt, 0, sizeof(ctxt));
 
-        /* Architecture-specific vmcs/vmcb bits */
-        hvm_funcs.save_cpu_ctxt(v, &ctxt);
-
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
-
-        ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
-
-        hvm_get_segment_register(v, x86_seg_idtr, &seg);
-        ctxt.idtr_limit = seg.limit;
-        ctxt.idtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_gdtr, &seg);
-        ctxt.gdtr_limit = seg.limit;
-        ctxt.gdtr_base = seg.base;
-
-        hvm_get_segment_register(v, x86_seg_cs, &seg);
-        ctxt.cs_sel = seg.sel;
-        ctxt.cs_limit = seg.limit;
-        ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ds, &seg);
-        ctxt.ds_sel = seg.sel;
-        ctxt.ds_limit = seg.limit;
-        ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_es, &seg);
-        ctxt.es_sel = seg.sel;
-        ctxt.es_limit = seg.limit;
-        ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ss, &seg);
-        ctxt.ss_sel = seg.sel;
-        ctxt.ss_limit = seg.limit;
-        ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_fs, &seg);
-        ctxt.fs_sel = seg.sel;
-        ctxt.fs_limit = seg.limit;
-        ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_gs, &seg);
-        ctxt.gs_sel = seg.sel;
-        ctxt.gs_limit = seg.limit;
-        ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_tr, &seg);
-        ctxt.tr_sel = seg.sel;
-        ctxt.tr_limit = seg.limit;
-        ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr;
-
-        hvm_get_segment_register(v, x86_seg_ldtr, &seg);
-        ctxt.ldtr_sel = seg.sel;
-        ctxt.ldtr_limit = seg.limit;
-        ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr;
-
-        if ( v->fpu_initialised )
-        {
-            memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-            ctxt.flags = XEN_X86_FPU_INITIALISED;
-        }
-
-        ctxt.rax = v->arch.user_regs.rax;
-        ctxt.rbx = v->arch.user_regs.rbx;
-        ctxt.rcx = v->arch.user_regs.rcx;
-        ctxt.rdx = v->arch.user_regs.rdx;
-        ctxt.rbp = v->arch.user_regs.rbp;
-        ctxt.rsi = v->arch.user_regs.rsi;
-        ctxt.rdi = v->arch.user_regs.rdi;
-        ctxt.rsp = v->arch.user_regs.rsp;
-        ctxt.rip = v->arch.user_regs.rip;
-        ctxt.rflags = v->arch.user_regs.rflags;
-        ctxt.r8  = v->arch.user_regs.r8;
-        ctxt.r9  = v->arch.user_regs.r9;
-        ctxt.r10 = v->arch.user_regs.r10;
-        ctxt.r11 = v->arch.user_regs.r11;
-        ctxt.r12 = v->arch.user_regs.r12;
-        ctxt.r13 = v->arch.user_regs.r13;
-        ctxt.r14 = v->arch.user_regs.r14;
-        ctxt.r15 = v->arch.user_regs.r15;
-        ctxt.dr0 = v->arch.debugreg[0];
-        ctxt.dr1 = v->arch.debugreg[1];
-        ctxt.dr2 = v->arch.debugreg[2];
-        ctxt.dr3 = v->arch.debugreg[3];
-        ctxt.dr6 = v->arch.debugreg[6];
-        ctxt.dr7 = v->arch.debugreg[7];
+        hvm_save_cpu_ctxt_one(v, &ctxt);
 
         if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1; 
@@ -1173,6 +1184,18 @@ HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, 
hvm_load_cpu_ctxt,
                                            save_area) + \
                                   xstate_ctxt_size(xcr0))
 
+void hvm_save_cpu_xsave_states_one(struct vcpu *v, struct hvm_hw_cpu_xsave 
**ctx, hvm_domain_context_t *h)
+{
+    unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
+    struct hvm_hw_cpu_xsave *ctxt = * ctx;
+
+    h->cur += size;
+
+    ctxt->xfeature_mask = xfeature_mask;
+    ctxt->xcr0 = v->arch.xcr0;
+    ctxt->xcr0_accum = v->arch.xcr0_accum;
+}
+
 static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1190,11 +1213,7 @@ static int hvm_save_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
         if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size) )
             return 1;
         ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
-        h->cur += size;
-
-        ctxt->xfeature_mask = xfeature_mask;
-        ctxt->xcr0 = v->arch.xcr0;
-        ctxt->xcr0_accum = v->arch.xcr0_accum;
+        hvm_save_cpu_xsave_states_one(v, &ctxt, h);
         expand_xsave_states(v, &ctxt->save_area,
                             size - offsetof(typeof(*ctxt), save_area));
     }
@@ -1339,6 +1358,39 @@ static const uint32_t msrs_to_send[] = {
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
+int hvm_save_cpu_msrs_one(struct vcpu *v, struct hvm_msr **ctx, 
hvm_domain_context_t *h)
+{
+    unsigned int i;
+    struct hvm_msr *ctxt = *ctx;
+
+    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
+    {
+        uint64_t val;
+        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
+
+        /*
+         * It is the programmers responsibility to ensure that
+         * msrs_to_send[] contain generally-read/write MSRs.
+         * X86EMUL_EXCEPTION here implies a missing feature, and that the
+         * guest doesn't have access to the MSR.
+         */
+        if ( rc == X86EMUL_EXCEPTION )
+            continue;
+
+        if ( rc != X86EMUL_OKAY )
+        {
+            ASSERT_UNREACHABLE();
+            return -ENXIO;
+        }
+
+        if ( !val )
+           continue; /* Skip empty MSRs. */
+        ctxt->msr[ctxt->count].index = msrs_to_send[i];
+        ctxt->msr[ctxt->count++].val = val;
+    }
+    return 0;
+}
+
 static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1355,32 +1407,7 @@ static int hvm_save_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
         ctxt = (struct hvm_msr *)&h->data[h->cur];
         ctxt->count = 0;
 
-        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
-        {
-            uint64_t val;
-            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
-
-            /*
-             * It is the programmers responsibility to ensure that
-             * msrs_to_send[] contain generally-read/write MSRs.
-             * X86EMUL_EXCEPTION here implies a missing feature, and that the
-             * guest doesn't have access to the MSR.
-             */
-            if ( rc == X86EMUL_EXCEPTION )
-                continue;
-
-            if ( rc != X86EMUL_OKAY )
-            {
-                ASSERT_UNREACHABLE();
-                return -ENXIO;
-            }
-
-            if ( !val )
-                continue; /* Skip empty MSRs. */
-
-            ctxt->msr[ctxt->count].index = msrs_to_send[i];
-            ctxt->msr[ctxt->count++].val = val;
-        }
+        hvm_save_cpu_msrs_one(v, &ctxt, h);
 
         if ( hvm_funcs.save_msr )
             hvm_funcs.save_msr(v, ctxt);
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index b721c63..2678b25 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -666,36 +666,41 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, 
uint64_t gfn_start,
     return 0;
 }
 
+void hvm_save_mtrr_msr_one(struct vcpu *v, struct hvm_hw_mtrr *hw_mtrr)
+{
+    struct mtrr_state *mtrr_state = &v->arch.hvm_vcpu.mtrr;
+     int i;
+
+    hvm_get_guest_pat(v, &hw_mtrr->msr_pat_cr);
+
+    hw_mtrr->msr_mtrr_def_type = mtrr_state->def_type
+                            | (mtrr_state->enabled << 10);
+    hw_mtrr->msr_mtrr_cap = mtrr_state->mtrr_cap;
+
+    for ( i = 0; i < MTRR_VCNT; i++ )
+    {
+        /* save physbase */
+        hw_mtrr->msr_mtrr_var[i*2] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2];
+        /* save physmask */
+        hw_mtrr->msr_mtrr_var[i*2+1] =
+            ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
+    }
+
+    for ( i = 0; i < NUM_FIXED_MSR; i++ )
+        hw_mtrr->msr_mtrr_fixed[i] =
+            ((uint64_t*)mtrr_state->fixed_ranges)[i];
+
+}
+
 static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
 {
-    int i;
     struct vcpu *v;
     struct hvm_hw_mtrr hw_mtrr;
-    struct mtrr_state *mtrr_state;
     /* save mtrr&pat */
     for_each_vcpu(d, v)
     {
-        mtrr_state = &v->arch.hvm_vcpu.mtrr;
-
-        hvm_get_guest_pat(v, &hw_mtrr.msr_pat_cr);
-
-        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
-                                | (mtrr_state->enabled << 10);
-        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
-
-        for ( i = 0; i < MTRR_VCNT; i++ )
-        {
-            /* save physbase */
-            hw_mtrr.msr_mtrr_var[i*2] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2];
-            /* save physmask */
-            hw_mtrr.msr_mtrr_var[i*2+1] =
-                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
-        }
-
-        for ( i = 0; i < NUM_FIXED_MSR; i++ )
-            hw_mtrr.msr_mtrr_fixed[i] =
-                ((uint64_t*)mtrr_state->fixed_ranges)[i];
+        hvm_save_mtrr_msr_one(v, &hw_mtrr);
 
         if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
             return 1;
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index d6aa89d..9a49e76 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -1028,6 +1028,12 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
                           viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
 
+void viridian_save_vcpu_ctxt_one(struct vcpu *v, struct 
hvm_viridian_vcpu_context *ctxt)
+{
+    ctxt->vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw;
+    ctxt->vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending;
+}
+
 static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct vcpu *v;
@@ -1036,10 +1042,9 @@ static int viridian_save_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         return 0;
 
     for_each_vcpu( d, v ) {
-        struct hvm_viridian_vcpu_context ctxt = {
-            .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw,
-            .vp_assist_pending = v->arch.hvm_vcpu.viridian.vp_assist.pending,
-        };
+        struct hvm_viridian_vcpu_context ctxt;
+
+        viridian_save_vcpu_ctxt_one(v, &ctxt);
 
         if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.