[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/hvm: Drop hvm_{vmx, svm} shorthands



commit 342dcb6430d76ebd1ce229a02bad83f8881c9ac9
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Aug 28 16:00:36 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri Aug 31 15:40:39 2018 +0100

    x86/hvm: Drop hvm_{vmx,svm} shorthands
    
    By making {vmx,svm} in hvm_vcpu into an anonymous union (consistent with
    domain side of things), the hvm_{vmx,svm} defines can be dropped, and all 
code
    refer to the correctly-named fields.  This means that the data hierachy is 
no
    longer obscured from grep/cscope/tags/etc.
    
    Reformat one comment and switch one bool_t to bool while making changes.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/x86/cpuid.c               |   2 +-
 xen/arch/x86/hvm/svm/asid.c        |   2 +-
 xen/arch/x86/hvm/svm/emulate.c     |   4 +-
 xen/arch/x86/hvm/svm/intr.c        |   8 +-
 xen/arch/x86/hvm/svm/nestedsvm.c   |  28 ++--
 xen/arch/x86/hvm/svm/svm.c         | 178 ++++++++++++------------
 xen/arch/x86/hvm/svm/vmcb.c        |   8 +-
 xen/arch/x86/hvm/vmx/intr.c        |  18 +--
 xen/arch/x86/hvm/vmx/realmode.c    |  18 +--
 xen/arch/x86/hvm/vmx/vmcs.c        | 154 ++++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c         | 272 ++++++++++++++++++-------------------
 xen/arch/x86/hvm/vmx/vvmx.c        |  64 ++++-----
 xen/arch/x86/mm/p2m-ept.c          |   6 +-
 xen/arch/x86/x86_64/asm-offsets.c  |  12 +-
 xen/drivers/passthrough/io.c       |   2 +-
 xen/include/asm-x86/domain.h       |   4 -
 xen/include/asm-x86/hvm/svm/asid.h |   2 +-
 xen/include/asm-x86/hvm/vcpu.h     |   2 +-
 xen/include/asm-x86/hvm/vmx/vmcs.h |   2 +-
 19 files changed, 390 insertions(+), 396 deletions(-)

diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 59d3298072..d21e7459ac 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -1072,7 +1072,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
     case 0x8000001c:
         if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
             /* Turn on available bit and other features specified in lwp_cfg. 
*/
-            res->a = (res->d & v->arch.hvm_svm.guest_lwp_cfg) | 1;
+            res->a = (res->d & v->arch.hvm.svm.guest_lwp_cfg) | 1;
         break;
     }
 }
diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c
index 7cc54dac77..e554e25213 100644
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -40,7 +40,7 @@ void svm_asid_init(const struct cpuinfo_x86 *c)
 void svm_asid_handle_vmrun(void)
 {
     struct vcpu *curr = current;
-    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
     struct hvm_vcpu_asid *p_asid =
         nestedhvm_vcpu_in_guestmode(curr)
         ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
index 535674e9dc..3d04af0ea9 100644
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -28,7 +28,7 @@
 
 static unsigned long svm_nextrip_insn_length(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( !cpu_has_svm_nrips )
         return 0;
@@ -86,7 +86,7 @@ static const struct {
 int __get_instruction_length_from_list(struct vcpu *v,
         const enum instruction_index *list, unsigned int list_count)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct hvm_emulate_ctxt ctxt;
     struct x86_emulate_state *state;
     unsigned long inst_len, j;
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 8511ff0b70..a17ec8cdd7 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -40,7 +40,7 @@
 
 static void svm_inject_nmi(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     eventinj_t event;
 
@@ -62,7 +62,7 @@ static void svm_inject_nmi(struct vcpu *v)
 
 static void svm_inject_extint(struct vcpu *v, int vector)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     eventinj_t event;
 
     event.bytes = 0;
@@ -76,7 +76,7 @@ static void svm_inject_extint(struct vcpu *v, int vector)
 
 static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     vintr_t intr;
 
@@ -133,7 +133,7 @@ static void svm_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
 void svm_intr_assist(void) 
 {
     struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct hvm_intack intack;
     enum hvm_intblk intblk;
 
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 9d0fef13fc..3f4f403901 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -137,7 +137,7 @@ void nsvm_vcpu_destroy(struct vcpu *v)
      * of l1 vmcb page.
      */
     if (nv->nv_n1vmcx)
-        v->arch.hvm_svm.vmcb = nv->nv_n1vmcx;
+        v->arch.hvm.svm.vmcb = nv->nv_n1vmcx;
 
     if (svm->ns_cached_msrpm) {
         free_xenheap_pages(svm->ns_cached_msrpm,
@@ -272,8 +272,8 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
      */
 
     /* switch vmcb to l1 guest's vmcb */
-    v->arch.hvm_svm.vmcb = n1vmcb;
-    v->arch.hvm_svm.vmcb_pa = nv->nv_n1vmcx_pa;
+    v->arch.hvm.svm.vmcb = n1vmcb;
+    v->arch.hvm.svm.vmcb_pa = nv->nv_n1vmcx_pa;
 
     /* EFER */
     v->arch.hvm.guest_efer = n1vmcb->_efer;
@@ -350,7 +350,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
 
 static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
 {
-    struct svm_vcpu *arch_svm = &v->arch.hvm_svm;
+    struct svm_vcpu *arch_svm = &v->arch.hvm.svm;
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
@@ -390,9 +390,7 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t 
viopm)
     nv->nv_ioport80 = ioport_80;
     nv->nv_ioportED = ioport_ed;
 
-    /* v->arch.hvm_svm.msrpm has type unsigned long, thus
-     * BYTES_PER_LONG.
-     */
+    /* v->arch.hvm.svm.msrpm has type unsigned long, thus BYTES_PER_LONG. */
     for (i = 0; i < MSRPM_SIZE / BYTES_PER_LONG; i++)
         svm->ns_merged_msrpm[i] = arch_svm->msrpm[i] | ns_msrpm_ptr[i];
 
@@ -730,8 +728,8 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs 
*regs,
     }
 
     /* switch vmcb to shadow vmcb */
-    v->arch.hvm_svm.vmcb = nv->nv_n2vmcx;
-    v->arch.hvm_svm.vmcb_pa = nv->nv_n2vmcx_pa;
+    v->arch.hvm.svm.vmcb = nv->nv_n2vmcx;
+    v->arch.hvm.svm.vmcb_pa = nv->nv_n2vmcx_pa;
 
     ret = nsvm_vmcb_prepare4vmrun(v, regs);
     if (ret) {
@@ -800,7 +798,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct 
cpu_user_regs *regs,
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
     struct vmcb_struct *ns_vmcb;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( vmcb->_vintr.fields.vgif_enable )
         ASSERT(vmcb->_vintr.fields.vgif == 0);
@@ -1348,7 +1346,7 @@ nestedsvm_vmexit_defer(struct vcpu *v,
     uint64_t exitcode, uint64_t exitinfo1, uint64_t exitinfo2)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( vmcb->_vintr.fields.vgif_enable )
         vmcb->_vintr.fields.vgif = 0;
@@ -1522,7 +1520,7 @@ void nsvm_vcpu_switch(struct cpu_user_regs *regs)
 
     nv = &vcpu_nestedhvm(v);
     svm = &vcpu_nestedsvm(v);
-    ASSERT(v->arch.hvm_svm.vmcb != NULL);
+    ASSERT(v->arch.hvm.svm.vmcb != NULL);
     ASSERT(nv->nv_n1vmcx != NULL);
     ASSERT(nv->nv_n2vmcx != NULL);
     ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR);
@@ -1607,7 +1605,7 @@ bool_t
 nestedsvm_gif_isset(struct vcpu *v)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     /* get the vmcb gif value if using vgif */
     if ( vmcb->_vintr.fields.vgif_enable )
@@ -1640,7 +1638,7 @@ void svm_vmexit_do_stgi(struct cpu_user_regs *regs, 
struct vcpu *v)
 
 void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     unsigned int inst_len;
     uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     vintr_t intr;
@@ -1672,7 +1670,7 @@ void svm_vmexit_do_clgi(struct cpu_user_regs *regs, 
struct vcpu *v)
  */
 void svm_nested_features_on_efer_update(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
     u32 general2_intercepts;
     vintr_t vintr;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index fc3daa6304..0b06e2ff11 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -88,7 +88,7 @@ static DEFINE_SPINLOCK(osvw_lock);
 /* Only crash the guest if the problem originates in kernel mode. */
 static void svm_crash_or_fault(struct vcpu *v)
 {
-    if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) )
+    if ( vmcb_get_cpl(v->arch.hvm.svm.vmcb) )
         hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
     else
         domain_crash(v->domain);
@@ -113,7 +113,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, 
unsigned int inst_len)
     regs->rip += inst_len;
     regs->eflags &= ~X86_EFLAGS_RF;
 
-    curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
+    curr->arch.hvm.svm.vmcb->interrupt_shadow = 0;
 
     if ( regs->eflags & X86_EFLAGS_TF )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
@@ -147,7 +147,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int 
flags)
     unsigned long *msr_bit;
     const struct domain *d = v->domain;
 
-    msr_bit = svm_msrbit(v->arch.hvm_svm.msrpm, msr);
+    msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
     BUG_ON(msr_bit == NULL);
     msr &= 0x1fff;
 
@@ -176,7 +176,7 @@ static void svm_set_icebp_interception(struct domain *d, 
bool enable)
 
     for_each_vcpu ( d, v )
     {
-        struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+        struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
         uint32_t intercepts = vmcb_get_general2_intercepts(vmcb);
 
         if ( enable )
@@ -190,7 +190,7 @@ static void svm_set_icebp_interception(struct domain *d, 
bool enable)
 
 static void svm_save_dr(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     unsigned int flag_dr_dirty = v->arch.hvm.flag_dr_dirty;
 
     if ( !flag_dr_dirty )
@@ -207,10 +207,10 @@ static void svm_save_dr(struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
 
-        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
-        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
-        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
-        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
+        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
+        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
+        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
     }
 
     v->arch.debugreg[0] = read_debugreg(0);
@@ -238,10 +238,10 @@ static void __restore_debug_registers(struct vmcb_struct 
*vmcb, struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
 
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
     }
 
     write_debugreg(0, v->arch.debugreg[0]);
@@ -260,23 +260,23 @@ static void __restore_debug_registers(struct vmcb_struct 
*vmcb, struct vcpu *v)
  */
 static void svm_restore_dr(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
         __restore_debug_registers(vmcb, v);
 }
 
 static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     c->cr0 = v->arch.hvm.guest_cr[0];
     c->cr2 = v->arch.hvm.guest_cr[2];
     c->cr3 = v->arch.hvm.guest_cr[3];
     c->cr4 = v->arch.hvm.guest_cr[4];
 
-    c->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs;
-    c->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp;
-    c->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip;
+    c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
+    c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
+    c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
     c->pending_event = 0;
     c->error_code = 0;
@@ -294,7 +294,7 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu 
*c)
 static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 {
     struct page_info *page = NULL;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     if ( c->pending_valid )
@@ -346,9 +346,9 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
     svm_update_guest_cr(v, 4, 0);
 
     /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
-    vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
-    vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
-    vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
+    vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
+    vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
+    vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
     
     if ( paging_mode_hap(v->domain) )
     {
@@ -377,7 +377,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
 
 static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     data->shadow_gs        = vmcb->kerngsbase;
     data->msr_lstar        = vmcb->lstar;
@@ -391,7 +391,7 @@ static void svm_save_cpu_state(struct vcpu *v, struct 
hvm_hw_cpu *data)
 
 static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     vmcb->kerngsbase = data->shadow_gs;
     vmcb->lstar      = data->msr_lstar;
@@ -429,19 +429,19 @@ static void svm_save_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
 {
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[0];
+        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[0];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[1];
+        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[1];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[2];
+        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[2];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm_svm.dr_mask[3];
+        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[3];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
     }
@@ -462,7 +462,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm_svm.dr_mask[0] = ctxt->msr[i].val;
+                v->arch.hvm.svm.dr_mask[0] = ctxt->msr[i].val;
             break;
 
         case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
@@ -471,7 +471,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm_svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+                v->arch.hvm.svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
                     ctxt->msr[i].val;
             break;
 
@@ -520,7 +520,7 @@ static void svm_fpu_leave(struct vcpu *v)
 
 static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     unsigned int intr_shadow = 0;
 
     if ( vmcb->interrupt_shadow )
@@ -534,7 +534,7 @@ static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
 
 static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
 
     vmcb->interrupt_shadow =
@@ -548,7 +548,7 @@ static void svm_set_interrupt_shadow(struct vcpu *v, 
unsigned int intr_shadow)
 
 static int svm_guest_x86_mode(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( unlikely(!(v->arch.hvm.guest_cr[0] & X86_CR0_PE)) )
         return 0;
@@ -561,7 +561,7 @@ static int svm_guest_x86_mode(struct vcpu *v)
 
 void svm_update_guest_cr(struct vcpu *v, unsigned int cr, unsigned int flags)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     uint64_t value;
 
     switch ( cr )
@@ -645,8 +645,8 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr, 
unsigned int flags)
 
 static void svm_update_guest_efer(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    bool_t lma = !!(v->arch.hvm.guest_efer & EFER_LMA);
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+    bool lma = v->arch.hvm.guest_efer & EFER_LMA;
     uint64_t new_efer;
 
     new_efer = (v->arch.hvm.guest_efer | EFER_SVME) & ~EFER_LME;
@@ -663,7 +663,7 @@ static void svm_update_guest_efer(struct vcpu *v)
 
 static void svm_cpuid_policy_changed(struct vcpu *v)
 {
-    struct svm_vcpu *svm = &v->arch.hvm_svm;
+    struct svm_vcpu *svm = &v->arch.hvm.svm;
     struct vmcb_struct *vmcb = svm->vmcb;
     const struct cpuid_policy *cp = v->domain->arch.cpuid;
     u32 bitmap = vmcb_get_exception_intercepts(vmcb);
@@ -683,7 +683,7 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
 
 static void svm_sync_vmcb(struct vcpu *v, enum vmcb_sync_state new_state)
 {
-    struct svm_vcpu *svm = &v->arch.hvm_svm;
+    struct svm_vcpu *svm = &v->arch.hvm.svm;
 
     if ( new_state == vmcb_needs_vmsave )
     {
@@ -704,13 +704,13 @@ static void svm_sync_vmcb(struct vcpu *v, enum 
vmcb_sync_state new_state)
 
 static unsigned int svm_get_cpl(struct vcpu *v)
 {
-    return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+    return vmcb_get_cpl(v->arch.hvm.svm.vmcb);
 }
 
 static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     ASSERT((v == current) || !vcpu_runnable(v));
 
@@ -755,7 +755,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
 static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     ASSERT((v == current) || !vcpu_runnable(v));
 
@@ -824,12 +824,12 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
 static unsigned long svm_get_shadow_gs_base(struct vcpu *v)
 {
-    return v->arch.hvm_svm.vmcb->kerngsbase;
+    return v->arch.hvm.svm.vmcb->kerngsbase;
 }
 
 static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( !paging_mode_hap(v->domain) )
         return 0;
@@ -840,7 +840,7 @@ static int svm_set_guest_pat(struct vcpu *v, u64 gpat)
 
 static int svm_get_guest_pat(struct vcpu *v, u64 *gpat)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( !paging_mode_hap(v->domain) )
         return 0;
@@ -888,7 +888,7 @@ static uint64_t svm_get_tsc_offset(uint64_t host_tsc, 
uint64_t guest_tsc,
 
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset, u64 at_tsc)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct vmcb_struct *n1vmcb, *n2vmcb;
     uint64_t n2_tsc_offset = 0;
     struct domain *d = v->domain;
@@ -921,7 +921,7 @@ static void svm_set_tsc_offset(struct vcpu *v, u64 offset, 
u64 at_tsc)
 
 static void svm_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     u32 general2_intercepts = vmcb_get_general2_intercepts(vmcb);
 
@@ -940,7 +940,7 @@ static void svm_set_rdtsc_exiting(struct vcpu *v, bool_t 
enable)
 
 static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
     u32 mask = GENERAL1_INTERCEPT_IDTR_READ | GENERAL1_INTERCEPT_GDTR_READ
             | GENERAL1_INTERCEPT_LDTR_READ | GENERAL1_INTERCEPT_TR_READ
@@ -957,14 +957,14 @@ static void svm_set_descriptor_access_exiting(struct vcpu 
*v, bool enable)
 
 static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    unsigned int len = v->arch.hvm_svm.cached_insn_len;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+    unsigned int len = v->arch.hvm.svm.cached_insn_len;
 
     if ( len != 0 )
     {
         /* Latch and clear the cached instruction. */
         memcpy(buf, vmcb->guest_ins, MAX_INST_LEN);
-        v->arch.hvm_svm.cached_insn_len = 0;
+        v->arch.hvm.svm.cached_insn_len = 0;
     }
 
     return len;
@@ -1000,14 +1000,14 @@ static void svm_lwp_interrupt(struct cpu_user_regs 
*regs)
     ack_APIC_irq();
     vlapic_set_irq(
         vcpu_vlapic(curr),
-        (curr->arch.hvm_svm.guest_lwp_cfg >> 40) & 0xff,
+        (curr->arch.hvm.svm.guest_lwp_cfg >> 40) & 0xff,
         0);
 }
 
 static inline void svm_lwp_save(struct vcpu *v)
 {
     /* Don't mess up with other guests. Disable LWP for next VCPU. */
-    if ( v->arch.hvm_svm.guest_lwp_cfg )
+    if ( v->arch.hvm.svm.guest_lwp_cfg )
     {
         wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
         wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
@@ -1017,8 +1017,8 @@ static inline void svm_lwp_save(struct vcpu *v)
 static inline void svm_lwp_load(struct vcpu *v)
 {
     /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
-   if ( v->arch.hvm_svm.guest_lwp_cfg ) 
-       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
+   if ( v->arch.hvm.svm.guest_lwp_cfg )
+       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
 }
 
 /* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
@@ -1035,22 +1035,22 @@ static int svm_update_lwp_cfg(struct vcpu *v, uint64_t 
msr_content)
         if ( msr_low & ~v->domain->arch.cpuid->extd.raw[0x1c].d )
             return -1;
 
-        v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+        v->arch.hvm.svm.guest_lwp_cfg = msr_content;
 
         /* setup interrupt handler if needed */
         if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) )
         {
             alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt);
-            v->arch.hvm_svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
+            v->arch.hvm.svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
                 | ((uint64_t)lwp_intr_vector << 40);
         }
         else
         {
             /* otherwise disable it */
-            v->arch.hvm_svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
+            v->arch.hvm.svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
         }
         
-        wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.cpu_lwp_cfg);
+        wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
 
         /* track nonalzy state if LWP_CFG is non-zero. */
         v->arch.nonlazy_xstate_used = !!(msr_content);
@@ -1100,7 +1100,7 @@ static void svm_ctxt_switch_from(struct vcpu *v)
 
 static void svm_ctxt_switch_to(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     int cpu = smp_processor_id();
 
     /*
@@ -1129,7 +1129,7 @@ static void svm_ctxt_switch_to(struct vcpu *v)
 
 static void noreturn svm_do_resume(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     bool debug_state = (v->domain->debugger_attached ||
                         v->domain->arch.monitor.software_breakpoint_enabled ||
                         v->domain->arch.monitor.debug_exception_enabled);
@@ -1150,9 +1150,9 @@ static void noreturn svm_do_resume(struct vcpu *v)
                               : (intercepts & ~(1U << TRAP_int3)));
     }
 
-    if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+    if ( v->arch.hvm.svm.launch_core != smp_processor_id() )
     {
-        v->arch.hvm_svm.launch_core = smp_processor_id();
+        v->arch.hvm.svm.launch_core = smp_processor_id();
         hvm_migrate_timers(v);
         hvm_migrate_pirqs(v);
         /* Migrating to another ASID domain.  Request a new ASID. */
@@ -1178,7 +1178,7 @@ static void noreturn svm_do_resume(struct vcpu *v)
 void svm_vmenter_helper(const struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
-    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
 
     svm_asid_handle_vmrun();
 
@@ -1284,7 +1284,7 @@ static int svm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    v->arch.hvm_svm.launch_core = -1;
+    v->arch.hvm.svm.launch_core = -1;
 
     if ( (rc = svm_create_vmcb(v)) != 0 )
     {
@@ -1314,7 +1314,7 @@ static void svm_vcpu_destroy(struct vcpu *v)
 static void svm_emul_swint_injection(struct x86_event *event)
 {
     struct vcpu *curr = current;
-    const struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+    const struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned int trap = event->vector, type = event->type;
     unsigned int fault = TRAP_gp_fault, ec = 0;
@@ -1421,7 +1421,7 @@ static void svm_emul_swint_injection(struct x86_event 
*event)
 static void svm_inject_event(const struct x86_event *event)
 {
     struct vcpu *curr = current;
-    struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
     eventinj_t eventinj = vmcb->eventinj;
     struct x86_event _event = *event;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -1552,7 +1552,7 @@ static void svm_inject_event(const struct x86_event 
*event)
 
 static int svm_event_pending(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     return vmcb->eventinj.fields.v;
 }
 
@@ -1792,7 +1792,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
 static void svm_fpu_dirty_intercept(void)
 {
     struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
 
     svm_fpu_enter(v);
@@ -1862,7 +1862,7 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     int ret;
     struct vcpu *v = current;
     const struct domain *d = v->domain;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     switch ( msr )
     {
@@ -1886,13 +1886,13 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     switch ( msr )
     {
     case MSR_IA32_SYSENTER_CS:
-        *msr_content = v->arch.hvm_svm.guest_sysenter_cs;
+        *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
         break;
     case MSR_IA32_SYSENTER_ESP:
-        *msr_content = v->arch.hvm_svm.guest_sysenter_esp;
+        *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
         break;
     case MSR_IA32_SYSENTER_EIP:
-        *msr_content = v->arch.hvm_svm.guest_sysenter_eip;
+        *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
         break;
 
     case MSR_STAR:
@@ -1962,7 +1962,7 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
         break;
 
     case MSR_AMD64_LWP_CFG:
-        *msr_content = v->arch.hvm_svm.guest_lwp_cfg;
+        *msr_content = v->arch.hvm.svm.guest_lwp_cfg;
         break;
 
     case MSR_K7_PERFCTR0:
@@ -1992,14 +1992,14 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
-        *msr_content = v->arch.hvm_svm.dr_mask[0];
+        *msr_content = v->arch.hvm.svm.dr_mask[0];
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
         *msr_content =
-            v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+            v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         break;
 
     case MSR_AMD_OSVW_ID_LENGTH:
@@ -2051,7 +2051,7 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
     int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     switch ( msr )
     {
@@ -2084,11 +2084,11 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         switch ( msr )
         {
         case MSR_IA32_SYSENTER_ESP:
-            vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = 
msr_content;
+            vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = 
msr_content;
             break;
 
         case MSR_IA32_SYSENTER_EIP:
-            vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = 
msr_content;
+            vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = 
msr_content;
             break;
 
         case MSR_LSTAR:
@@ -2114,7 +2114,7 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         break;
 
     case MSR_IA32_SYSENTER_CS:
-        vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+        vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
         break;
 
     case MSR_STAR:
@@ -2194,13 +2194,13 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm_svm.dr_mask[0] = msr_content;
+        v->arch.hvm.svm.dr_mask[0] = msr_content;
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+        v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
             msr_content;
         break;
 
@@ -2251,7 +2251,7 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 static void svm_do_msr_access(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
-    bool rdmsr = curr->arch.hvm_svm.vmcb->exitinfo1 == 0;
+    bool rdmsr = curr->arch.hvm.svm.vmcb->exitinfo1 == 0;
     int rc, inst_len = __get_instruction_length(
         curr, rdmsr ? INSTR_RDMSR : INSTR_WRMSR);
 
@@ -2391,7 +2391,7 @@ svm_vmexit_do_vmload(struct vmcb_struct *vmcb,
     put_page(page);
 
     /* State in L1 VMCB is stale now */
-    v->arch.hvm_svm.vmcb_sync_state = vmcb_needs_vmsave;
+    v->arch.hvm.svm.vmcb_sync_state = vmcb_needs_vmsave;
 
     __update_guest_eip(regs, inst_len);
 }
@@ -2519,7 +2519,7 @@ static void svm_invlpg(struct vcpu *v, unsigned long 
vaddr)
 
 static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
 {
-    const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
     if ( vmcb->eventinj.fields.v )
         return false;
@@ -2594,7 +2594,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
 {
     uint64_t exit_reason;
     struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     eventinj_t eventinj;
     int inst_len, rc;
     vintr_t intr;
@@ -2816,9 +2816,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                     regs->rdx, regs->rsi, regs->rdi);
 
         if ( cpu_has_svm_decode )
-            v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
+            v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
         rc = paging_fault(va, regs);
-        v->arch.hvm_svm.cached_insn_len = 0;
+        v->arch.hvm.svm.cached_insn_len = 0;
 
         if ( rc )
         {
@@ -3020,7 +3020,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     case VMEXIT_NPF:
         perfc_incra(svmexits, VMEXIT_NPF_PERFC);
         if ( cpu_has_svm_decode )
-            v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
+            v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
         rc = vmcb->exitinfo1 & PFEC_page_present
              ? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) : 0;
         if ( rc >= 0 )
@@ -3032,7 +3032,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                    v, rc, vmcb->exitinfo2, vmcb->exitinfo1);
             domain_crash(v->domain);
         }
-        v->arch.hvm_svm.cached_insn_len = 0;
+        v->arch.hvm.svm.cached_insn_len = 0;
         break;
 
     case VMEXIT_IRET: {
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 2fa1ff532b..9d1c5bf6af 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -53,7 +53,7 @@ void free_vmcb(struct vmcb_struct *vmcb)
 /* This function can directly access fields which are covered by clean bits. */
 static int construct_vmcb(struct vcpu *v)
 {
-    struct svm_vcpu *svm = &v->arch.hvm_svm;
+    struct svm_vcpu *svm = &v->arch.hvm.svm;
     struct vmcb_struct *vmcb = svm->vmcb;
 
     /* Build-time check of the size of VMCB AMD structure. */
@@ -225,7 +225,7 @@ static int construct_vmcb(struct vcpu *v)
 int svm_create_vmcb(struct vcpu *v)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
-    struct svm_vcpu *svm = &v->arch.hvm_svm;
+    struct svm_vcpu *svm = &v->arch.hvm.svm;
     int rc;
 
     if ( (nv->nv_n1vmcx == NULL) &&
@@ -252,7 +252,7 @@ int svm_create_vmcb(struct vcpu *v)
 void svm_destroy_vmcb(struct vcpu *v)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
-    struct svm_vcpu *svm = &v->arch.hvm_svm;
+    struct svm_vcpu *svm = &v->arch.hvm.svm;
 
     if ( nv->nv_n1vmcx != NULL )
         free_vmcb(nv->nv_n1vmcx);
@@ -286,7 +286,7 @@ static void vmcb_dump(unsigned char ch)
         for_each_vcpu ( d, v )
         {
             printk("\tVCPU %d\n", v->vcpu_id);
-            svm_vmcb_dump("key_handler", v->arch.hvm_svm.vmcb);
+            svm_vmcb_dump("key_handler", v->arch.hvm.svm.vmcb);
         }
     }
 
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 889067c5f7..5e8cbd4b4a 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -106,9 +106,9 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
         ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
     }
 
-    if ( !(v->arch.hvm_vmx.exec_control & ctl) )
+    if ( !(v->arch.hvm.vmx.exec_control & ctl) )
     {
-        v->arch.hvm_vmx.exec_control |= ctl;
+        v->arch.hvm.vmx.exec_control |= ctl;
         vmx_update_cpu_exec_control(v);
     }
 }
@@ -137,7 +137,7 @@ static void vmx_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
  *  Unfortunately, interrupt blocking in L2 won't work with simple
  *  intr_window_open (which depends on L2's IF). To solve this,
  *  the following algorithm can be used:
- *   v->arch.hvm_vmx.exec_control.VIRTUAL_INTR_PENDING now denotes
+ *   v->arch.hvm.vmx.exec_control.VIRTUAL_INTR_PENDING now denotes
  *   only L0 control, physical control may be different from it.
  *       - if in L1, it behaves normally, intr window is written
  *         to physical control as it is
@@ -234,7 +234,7 @@ void vmx_intr_assist(void)
     /* Block event injection when single step with MTF. */
     if ( unlikely(v->arch.hvm.single_step) )
     {
-        v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
+        v->arch.hvm.vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
         vmx_update_cpu_exec_control(v);
         return;
     }
@@ -352,7 +352,7 @@ void vmx_intr_assist(void)
                     printk("\n");
                 }
 
-                pi_desc = &v->arch.hvm_vmx.pi_desc;
+                pi_desc = &v->arch.hvm.vmx.pi_desc;
                 if ( pi_desc )
                 {
                     word = (const void *)&pi_desc->pir;
@@ -374,12 +374,12 @@ void vmx_intr_assist(void)
                     intack.vector;
         __vmwrite(GUEST_INTR_STATUS, status);
 
-        n = ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap);
-        while ( (i = find_first_bit(&v->arch.hvm_vmx.eoi_exitmap_changed,
+        n = ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap);
+        while ( (i = find_first_bit(&v->arch.hvm.vmx.eoi_exitmap_changed,
                                     n)) < n )
         {
-            clear_bit(i, &v->arch.hvm_vmx.eoi_exitmap_changed);
-            __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm_vmx.eoi_exit_bitmap[i]);
+            clear_bit(i, &v->arch.hvm.vmx.eoi_exitmap_changed);
+            __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]);
         }
 
         pt_intr_post(v, intack);
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 032a681243..bb0b4439df 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -175,8 +175,8 @@ void vmx_realmode(struct cpu_user_regs *regs)
         intr_info = 0;
     }
 
-    curr->arch.hvm_vmx.vmx_emulate = 1;
-    while ( curr->arch.hvm_vmx.vmx_emulate &&
+    curr->arch.hvm.vmx.vmx_emulate = 1;
+    while ( curr->arch.hvm.vmx.vmx_emulate &&
             !softirq_pending(smp_processor_id()) )
     {
         /*
@@ -185,7 +185,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
          * in real mode, because we don't emulate protected-mode IDT vectoring.
          */
         if ( unlikely(!(++emulations & 15)) &&
-             curr->arch.hvm_vmx.vmx_realmode && 
+             curr->arch.hvm.vmx.vmx_realmode &&
              hvm_local_events_need_delivery(curr) )
             break;
 
@@ -195,20 +195,20 @@ void vmx_realmode(struct cpu_user_regs *regs)
             break;
 
         /* Stop emulating unless our segment state is not safe */
-        if ( curr->arch.hvm_vmx.vmx_realmode )
-            curr->arch.hvm_vmx.vmx_emulate = 
-                (curr->arch.hvm_vmx.vm86_segment_mask != 0);
+        if ( curr->arch.hvm.vmx.vmx_realmode )
+            curr->arch.hvm.vmx.vmx_emulate =
+                (curr->arch.hvm.vmx.vm86_segment_mask != 0);
         else
-            curr->arch.hvm_vmx.vmx_emulate = 
+            curr->arch.hvm.vmx.vmx_emulate =
                  ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
                   || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
     }
 
     /* Need to emulate next time if we've started an IO operation */
     if ( vio->io_req.state != STATE_IOREQ_NONE )
-        curr->arch.hvm_vmx.vmx_emulate = 1;
+        curr->arch.hvm.vmx.vmx_emulate = 1;
 
-    if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
+    if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode )
     {
         /*
          * Cannot enter protected mode with bogus selector RPLs and DPLs.
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a847f37186..d9747b4fd3 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -518,7 +518,7 @@ static void vmx_free_vmcs(paddr_t pa)
 static void __vmx_clear_vmcs(void *info)
 {
     struct vcpu *v = info;
-    struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
 
     /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
     ASSERT(!local_irq_is_enabled());
@@ -541,7 +541,7 @@ static void __vmx_clear_vmcs(void *info)
 
 static void vmx_clear_vmcs(struct vcpu *v)
 {
-    int cpu = v->arch.hvm_vmx.active_cpu;
+    int cpu = v->arch.hvm.vmx.active_cpu;
 
     if ( cpu != -1 )
         on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
@@ -553,16 +553,16 @@ static void vmx_load_vmcs(struct vcpu *v)
 
     local_irq_save(flags);
 
-    if ( v->arch.hvm_vmx.active_cpu == -1 )
+    if ( v->arch.hvm.vmx.active_cpu == -1 )
     {
-        list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
-        v->arch.hvm_vmx.active_cpu = smp_processor_id();
+        list_add(&v->arch.hvm.vmx.active_list, &this_cpu(active_vmcs_list));
+        v->arch.hvm.vmx.active_cpu = smp_processor_id();
     }
 
-    ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
+    ASSERT(v->arch.hvm.vmx.active_cpu == smp_processor_id());
 
-    __vmptrld(v->arch.hvm_vmx.vmcs_pa);
-    this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs_pa;
+    __vmptrld(v->arch.hvm.vmx.vmcs_pa);
+    this_cpu(current_vmcs) = v->arch.hvm.vmx.vmcs_pa;
 
     local_irq_restore(flags);
 }
@@ -571,11 +571,11 @@ void vmx_vmcs_reload(struct vcpu *v)
 {
     /*
      * As we may be running with interrupts disabled, we can't acquire
-     * v->arch.hvm_vmx.vmcs_lock here. However, with interrupts disabled
+     * v->arch.hvm.vmx.vmcs_lock here. However, with interrupts disabled
      * the VMCS can't be taken away from us anymore if we still own it.
      */
     ASSERT(v->is_running || !local_irq_is_enabled());
-    if ( v->arch.hvm_vmx.vmcs_pa == this_cpu(current_vmcs) )
+    if ( v->arch.hvm.vmx.vmcs_pa == this_cpu(current_vmcs) )
         return;
 
     vmx_load_vmcs(v);
@@ -717,7 +717,7 @@ void vmx_cpu_down(void)
 
     while ( !list_empty(active_vmcs_list) )
         __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
-                                    struct vcpu, arch.hvm_vmx.active_list));
+                                    struct vcpu, arch.hvm.vmx.active_list));
 
     BUG_ON(!(read_cr4() & X86_CR4_VMXE));
     this_cpu(vmxon) = 0;
@@ -741,7 +741,7 @@ bool_t vmx_vmcs_try_enter(struct vcpu *v)
      * vmx_vmcs_enter/exit and scheduling tail critical regions.
      */
     if ( likely(v == current) )
-        return v->arch.hvm_vmx.vmcs_pa == this_cpu(current_vmcs);
+        return v->arch.hvm.vmx.vmcs_pa == this_cpu(current_vmcs);
 
     fv = &this_cpu(foreign_vmcs);
 
@@ -755,7 +755,7 @@ bool_t vmx_vmcs_try_enter(struct vcpu *v)
         BUG_ON(fv->count != 0);
 
         vcpu_pause(v);
-        spin_lock(&v->arch.hvm_vmx.vmcs_lock);
+        spin_lock(&v->arch.hvm.vmx.vmcs_lock);
 
         vmx_clear_vmcs(v);
         vmx_load_vmcs(v);
@@ -793,7 +793,7 @@ void vmx_vmcs_exit(struct vcpu *v)
         if ( is_hvm_vcpu(current) )
             vmx_load_vmcs(current);
 
-        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
+        spin_unlock(&v->arch.hvm.vmx.vmcs_lock);
         vcpu_unpause(v);
 
         fv->v = NULL;
@@ -824,7 +824,7 @@ static void vmx_set_host_env(struct vcpu *v)
 void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
                              enum vmx_msr_intercept_type type)
 {
-    struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+    struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
     struct domain *d = v->domain;
 
     /* VMX MSR bitmap supported? */
@@ -856,7 +856,7 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int 
msr,
 void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
                            enum vmx_msr_intercept_type type)
 {
-    struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+    struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
 
     /* VMX MSR bitmap supported? */
     if ( msr_bitmap == NULL )
@@ -901,7 +901,7 @@ bool vmx_msr_is_intercepted(struct vmx_msr_bitmap 
*msr_bitmap,
  */
 void vmx_vmcs_switch(paddr_t from, paddr_t to)
 {
-    struct vmx_vcpu *vmx = &current->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &current->arch.hvm.vmx;
     spin_lock(&vmx->vmcs_lock);
 
     __vmpclear(from);
@@ -924,14 +924,14 @@ void vmx_vmcs_switch(paddr_t from, paddr_t to)
 
 void virtual_vmcs_enter(const struct vcpu *v)
 {
-    __vmptrld(v->arch.hvm_vmx.vmcs_shadow_maddr);
+    __vmptrld(v->arch.hvm.vmx.vmcs_shadow_maddr);
 }
 
 void virtual_vmcs_exit(const struct vcpu *v)
 {
     paddr_t cur = this_cpu(current_vmcs);
 
-    __vmpclear(v->arch.hvm_vmx.vmcs_shadow_maddr);
+    __vmpclear(v->arch.hvm.vmx.vmcs_shadow_maddr);
     if ( cur )
         __vmptrld(cur);
 }
@@ -984,13 +984,13 @@ enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const 
struct vcpu *v,
  */
 static void pi_desc_init(struct vcpu *v)
 {
-    v->arch.hvm_vmx.pi_desc.nv = posted_intr_vector;
+    v->arch.hvm.vmx.pi_desc.nv = posted_intr_vector;
 
     /*
      * Mark NDST as invalid, then we can use this invalid value as a
      * marker to whether update NDST or not in vmx_pi_hooks_assign().
      */
-    v->arch.hvm_vmx.pi_desc.ndst = APIC_INVALID_DEST;
+    v->arch.hvm.vmx.pi_desc.ndst = APIC_INVALID_DEST;
 }
 
 static int construct_vmcs(struct vcpu *v)
@@ -1005,31 +1005,31 @@ static int construct_vmcs(struct vcpu *v)
     /* VMCS controls. */
     __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
 
-    v->arch.hvm_vmx.exec_control = vmx_cpu_based_exec_control;
+    v->arch.hvm.vmx.exec_control = vmx_cpu_based_exec_control;
     if ( d->arch.vtsc && !cpu_has_vmx_tsc_scaling )
-        v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+        v->arch.hvm.vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
 
-    v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
+    v->arch.hvm.vmx.secondary_exec_control = vmx_secondary_exec_control;
 
     /*
      * Disable descriptor table exiting: It's controlled by the VM event
      * monitor requesting it.
      */
-    v->arch.hvm_vmx.secondary_exec_control &=
+    v->arch.hvm.vmx.secondary_exec_control &=
         ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
 
     /* Disable VPID for now: we decide when to enable it on VMENTER. */
-    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+    v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
     if ( paging_mode_hap(d) )
     {
-        v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
+        v->arch.hvm.vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
                                           CPU_BASED_CR3_LOAD_EXITING |
                                           CPU_BASED_CR3_STORE_EXITING);
     }
     else
     {
-        v->arch.hvm_vmx.secondary_exec_control &= 
+        v->arch.hvm.vmx.secondary_exec_control &=
             ~(SECONDARY_EXEC_ENABLE_EPT | 
               SECONDARY_EXEC_UNRESTRICTED_GUEST |
               SECONDARY_EXEC_ENABLE_INVPCID);
@@ -1039,25 +1039,25 @@ static int construct_vmcs(struct vcpu *v)
     }
 
     /* Disable Virtualize x2APIC mode by default. */
-    v->arch.hvm_vmx.secondary_exec_control &=
+    v->arch.hvm.vmx.secondary_exec_control &=
         ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
 
     /* Do not enable Monitor Trap Flag unless start single step debug */
-    v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+    v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
 
     /* Disable VMFUNC and #VE for now: they may be enabled later by altp2m. */
-    v->arch.hvm_vmx.secondary_exec_control &=
+    v->arch.hvm.vmx.secondary_exec_control &=
         ~(SECONDARY_EXEC_ENABLE_VM_FUNCTIONS |
           SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS);
 
     if ( !has_vlapic(d) )
     {
         /* Disable virtual apics, TPR */
-        v->arch.hvm_vmx.secondary_exec_control &=
+        v->arch.hvm.vmx.secondary_exec_control &=
             ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
               | SECONDARY_EXEC_APIC_REGISTER_VIRT
               | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
-        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
+        v->arch.hvm.vmx.exec_control &= ~CPU_BASED_TPR_SHADOW;
 
         /* In turn, disable posted interrupts. */
         __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
@@ -1077,7 +1077,7 @@ static int construct_vmcs(struct vcpu *v)
 
     if ( cpu_has_vmx_secondary_exec_control )
         __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.secondary_exec_control);
+                  v->arch.hvm.vmx.secondary_exec_control);
 
     /* MSR access bitmap. */
     if ( cpu_has_vmx_msr_bitmap )
@@ -1091,7 +1091,7 @@ static int construct_vmcs(struct vcpu *v)
         }
 
         memset(msr_bitmap, ~0, PAGE_SIZE);
-        v->arch.hvm_vmx.msr_bitmap = msr_bitmap;
+        v->arch.hvm.vmx.msr_bitmap = msr_bitmap;
         __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
 
         vmx_clear_msr_intercept(v, MSR_FS_BASE, VMX_MSR_RW);
@@ -1116,8 +1116,8 @@ static int construct_vmcs(struct vcpu *v)
         unsigned int i;
 
         /* EOI-exit bitmap */
-        bitmap_zero(v->arch.hvm_vmx.eoi_exit_bitmap, NR_VECTORS);
-        for ( i = 0; i < ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap); ++i )
+        bitmap_zero(v->arch.hvm.vmx.eoi_exit_bitmap, NR_VECTORS);
+        for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); ++i )
             __vmwrite(EOI_EXIT_BITMAP(i), 0);
 
         /* Initialise Guest Interrupt Status (RVI and SVI) to 0 */
@@ -1129,12 +1129,12 @@ static int construct_vmcs(struct vcpu *v)
         if ( iommu_intpost )
             pi_desc_init(v);
 
-        __vmwrite(PI_DESC_ADDR, virt_to_maddr(&v->arch.hvm_vmx.pi_desc));
+        __vmwrite(PI_DESC_ADDR, virt_to_maddr(&v->arch.hvm.vmx.pi_desc));
         __vmwrite(POSTED_INTR_NOTIFICATION_VECTOR, posted_intr_vector);
     }
 
     /* Disable PML anyway here as it will only be enabled in log dirty mode */
-    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+    v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
 
     /* Host data selectors. */
     __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
@@ -1147,10 +1147,10 @@ static int construct_vmcs(struct vcpu *v)
     __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
 
     /* Host control registers. */
-    v->arch.hvm_vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
+    v->arch.hvm.vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
     if ( !v->arch.fully_eager_fpu )
-        v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
-    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+        v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
+    __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
     __vmwrite(HOST_CR4, mmu_cr4_features);
     if ( cpu_has_vmx_efer )
         __vmwrite(HOST_EFER, read_efer());
@@ -1172,7 +1172,7 @@ static int construct_vmcs(struct vcpu *v)
 
     __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
     __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
-    v->arch.hvm_vmx.cr4_host_mask = ~0UL;
+    v->arch.hvm.vmx.cr4_host_mask = ~0UL;
 
     __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
     __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
@@ -1228,7 +1228,7 @@ static int construct_vmcs(struct vcpu *v)
     __vmwrite(GUEST_DR7, 0);
     __vmwrite(VMCS_LINK_POINTER, ~0UL);
 
-    v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
+    v->arch.hvm.vmx.exception_bitmap = HVM_TRAP_MASK
               | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
               | (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
     vmx_update_exception_bitmap(v);
@@ -1308,7 +1308,7 @@ static struct vmx_msr_entry *locate_msr_entry(
 struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
                                    enum vmx_msr_list_type type)
 {
-    const struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    const struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
     struct vmx_msr_entry *start = NULL, *ent, *end;
     unsigned int substart = 0, subend = vmx->msr_save_count;
     unsigned int total = vmx->msr_load_count;
@@ -1349,7 +1349,7 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, 
uint32_t msr,
 int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
                 enum vmx_msr_list_type type)
 {
-    struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
     struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
     unsigned int substart, subend, total;
     int rc;
@@ -1460,7 +1460,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t 
val,
 
 int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type)
 {
-    struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
     struct vmx_msr_entry *start = NULL, *ent, *end;
     unsigned int substart = 0, subend = vmx->msr_save_count;
     unsigned int total = vmx->msr_load_count;
@@ -1524,21 +1524,21 @@ int vmx_del_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
 
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector)
 {
-    if ( !test_and_set_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
+    if ( !test_and_set_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) )
         set_bit(vector / BITS_PER_LONG,
-                &v->arch.hvm_vmx.eoi_exitmap_changed);
+                &v->arch.hvm.vmx.eoi_exitmap_changed);
 }
 
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector)
 {
-    if ( test_and_clear_bit(vector, v->arch.hvm_vmx.eoi_exit_bitmap) )
+    if ( test_and_clear_bit(vector, v->arch.hvm.vmx.eoi_exit_bitmap) )
         set_bit(vector / BITS_PER_LONG,
-                &v->arch.hvm_vmx.eoi_exitmap_changed);
+                &v->arch.hvm.vmx.eoi_exitmap_changed);
 }
 
 bool_t vmx_vcpu_pml_enabled(const struct vcpu *v)
 {
-    return !!(v->arch.hvm_vmx.secondary_exec_control &
+    return !!(v->arch.hvm.vmx.secondary_exec_control &
               SECONDARY_EXEC_ENABLE_PML);
 }
 
@@ -1547,19 +1547,19 @@ int vmx_vcpu_enable_pml(struct vcpu *v)
     if ( vmx_vcpu_pml_enabled(v) )
         return 0;
 
-    v->arch.hvm_vmx.pml_pg = v->domain->arch.paging.alloc_page(v->domain);
-    if ( !v->arch.hvm_vmx.pml_pg )
+    v->arch.hvm.vmx.pml_pg = v->domain->arch.paging.alloc_page(v->domain);
+    if ( !v->arch.hvm.vmx.pml_pg )
         return -ENOMEM;
 
     vmx_vmcs_enter(v);
 
-    __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
+    __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm.vmx.pml_pg));
     __vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
 
-    v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
+    v->arch.hvm.vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
 
     __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-              v->arch.hvm_vmx.secondary_exec_control);
+              v->arch.hvm.vmx.secondary_exec_control);
 
     vmx_vmcs_exit(v);
 
@@ -1576,14 +1576,14 @@ void vmx_vcpu_disable_pml(struct vcpu *v)
 
     vmx_vmcs_enter(v);
 
-    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+    v->arch.hvm.vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
     __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-              v->arch.hvm_vmx.secondary_exec_control);
+              v->arch.hvm.vmx.secondary_exec_control);
 
     vmx_vmcs_exit(v);
 
-    v->domain->arch.paging.free_page(v->domain, v->arch.hvm_vmx.pml_pg);
-    v->arch.hvm_vmx.pml_pg = NULL;
+    v->domain->arch.paging.free_page(v->domain, v->arch.hvm.vmx.pml_pg);
+    v->arch.hvm.vmx.pml_pg = NULL;
 }
 
 void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
@@ -1602,7 +1602,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
     if ( pml_idx == (NR_PML_ENTRIES - 1) )
         goto out;
 
-    pml_buf = __map_domain_page(v->arch.hvm_vmx.pml_pg);
+    pml_buf = __map_domain_page(v->arch.hvm.vmx.pml_pg);
 
     /*
      * PML index can be either 2^16-1 (buffer is full), or 0 ~ NR_PML_ENTRIES-1
@@ -1743,7 +1743,7 @@ void vmx_domain_update_eptp(struct domain *d)
 
 int vmx_create_vmcs(struct vcpu *v)
 {
-    struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
     int rc;
 
     if ( (vmx->vmcs_pa = vmx_alloc_vmcs()) == 0 )
@@ -1765,15 +1765,15 @@ int vmx_create_vmcs(struct vcpu *v)
 
 void vmx_destroy_vmcs(struct vcpu *v)
 {
-    struct vmx_vcpu *vmx = &v->arch.hvm_vmx;
+    struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
 
     vmx_clear_vmcs(v);
 
     vmx_free_vmcs(vmx->vmcs_pa);
 
-    free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
-    free_xenheap_page(v->arch.hvm_vmx.msr_area);
-    free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
+    free_xenheap_page(v->arch.hvm.vmx.host_msr_area);
+    free_xenheap_page(v->arch.hvm.vmx.msr_area);
+    free_xenheap_page(v->arch.hvm.vmx.msr_bitmap);
 }
 
 void vmx_vmentry_failure(void)
@@ -1783,7 +1783,7 @@ void vmx_vmentry_failure(void)
 
     __vmread(VM_INSTRUCTION_ERROR, &error);
     gprintk(XENLOG_ERR, "VM%s error: %#lx\n",
-            curr->arch.hvm_vmx.launched ? "RESUME" : "LAUNCH", error);
+            curr->arch.hvm.vmx.launched ? "RESUME" : "LAUNCH", error);
 
     if ( error == VMX_INSN_INVALID_CONTROL_STATE ||
          error == VMX_INSN_INVALID_HOST_STATE )
@@ -1797,7 +1797,7 @@ void vmx_do_resume(struct vcpu *v)
     bool_t debug_state;
     unsigned long host_cr4;
 
-    if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
+    if ( v->arch.hvm.vmx.active_cpu == smp_processor_id() )
         vmx_vmcs_reload(v);
     else
     {
@@ -1814,7 +1814,7 @@ void vmx_do_resume(struct vcpu *v)
         if ( has_arch_pdevs(v->domain) && !iommu_snoop
                 && !cpu_has_wbinvd_exiting )
         {
-            int cpu = v->arch.hvm_vmx.active_cpu;
+            int cpu = v->arch.hvm.vmx.active_cpu;
             if ( cpu != -1 )
                 flush_mask(cpumask_of(cpu), FLUSH_CACHE);
         }
@@ -1829,7 +1829,7 @@ void vmx_do_resume(struct vcpu *v)
          * VCPU migration. The environment of current VMCS is updated in place,
          * but the action of another VMCS is deferred till it is switched in.
          */
-        v->arch.hvm_vmx.hostenv_migrated = 1;
+        v->arch.hvm.vmx.hostenv_migrated = 1;
 
         hvm_asid_flush_vcpu(v);
     }
@@ -1925,7 +1925,7 @@ void vmcs_dump_vcpu(struct vcpu *v)
     printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
            cr4, vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK));
     printk("CR3 = 0x%016lx\n", vmr(GUEST_CR3));
-    if ( (v->arch.hvm_vmx.secondary_exec_control &
+    if ( (v->arch.hvm.vmx.secondary_exec_control &
           SECONDARY_EXEC_ENABLE_EPT) &&
          (cr4 & X86_CR4_PAE) && !(vmentry_ctl & VM_ENTRY_IA32E_MODE) )
     {
@@ -1965,7 +1965,7 @@ void vmcs_dump_vcpu(struct vcpu *v)
                vmr(GUEST_PERF_GLOBAL_CTRL), vmr(GUEST_BNDCFGS));
     printk("Interruptibility = %08x  ActivityState = %08x\n",
            vmr32(GUEST_INTERRUPTIBILITY_INFO), vmr32(GUEST_ACTIVITY_STATE));
-    if ( v->arch.hvm_vmx.secondary_exec_control &
+    if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY )
         printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS));
 
@@ -2016,11 +2016,11 @@ void vmcs_dump_vcpu(struct vcpu *v)
            vmr32(IDT_VECTORING_INFO), vmr32(IDT_VECTORING_ERROR_CODE));
     printk("TSC Offset = 0x%016lx  TSC Multiplier = 0x%016lx\n",
            vmr(TSC_OFFSET), vmr(TSC_MULTIPLIER));
-    if ( (v->arch.hvm_vmx.exec_control & CPU_BASED_TPR_SHADOW) ||
+    if ( (v->arch.hvm.vmx.exec_control & CPU_BASED_TPR_SHADOW) ||
          (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT) )
         printk("TPR Threshold = 0x%02x  PostedIntrVec = 0x%02x\n",
                vmr32(TPR_THRESHOLD), vmr16(POSTED_INTR_NOTIFICATION_VECTOR));
-    if ( (v->arch.hvm_vmx.secondary_exec_control &
+    if ( (v->arch.hvm.vmx.secondary_exec_control &
           SECONDARY_EXEC_ENABLE_EPT) )
         printk("EPT pointer = 0x%016lx  EPTP index = 0x%04x\n",
                vmr(EPT_POINTER), vmr16(EPTP_INDEX));
@@ -2031,11 +2031,11 @@ void vmcs_dump_vcpu(struct vcpu *v)
                i + 1, vmr(CR3_TARGET_VALUE(i + 1)));
     if ( i < n )
         printk("CR3 target%u=%016lx\n", i, vmr(CR3_TARGET_VALUE(i)));
-    if ( v->arch.hvm_vmx.secondary_exec_control &
+    if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_PAUSE_LOOP_EXITING )
         printk("PLE Gap=%08x Window=%08x\n",
                vmr32(PLE_GAP), vmr32(PLE_WINDOW));
-    if ( v->arch.hvm_vmx.secondary_exec_control &
+    if ( v->arch.hvm.vmx.secondary_exec_control &
          (SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
         printk("Virtual processor ID = 0x%04x VMfunc controls = %016lx\n",
                vmr16(VIRTUAL_PROCESSOR_ID), vmr(VM_FUNCTION_CONTROL));
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 141737c72e..e926b0b28e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -108,20 +108,20 @@ static void vmx_vcpu_block(struct vcpu *v)
     spinlock_t *old_lock;
     spinlock_t *pi_blocking_list_lock =
                &per_cpu(vmx_pi_blocking, v->processor).lock;
-    struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+    struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
 
     spin_lock_irqsave(pi_blocking_list_lock, flags);
-    old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
+    old_lock = cmpxchg(&v->arch.hvm.vmx.pi_blocking.lock, NULL,
                        pi_blocking_list_lock);
 
     /*
-     * 'v->arch.hvm_vmx.pi_blocking.lock' should be NULL before
+     * 'v->arch.hvm.vmx.pi_blocking.lock' should be NULL before
      * being assigned to a new value, since the vCPU is currently
      * running and it cannot be on any blocking list.
      */
     ASSERT(old_lock == NULL);
 
-    list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
+    list_add_tail(&v->arch.hvm.vmx.pi_blocking.list,
                   &per_cpu(vmx_pi_blocking, v->processor).list);
     spin_unlock_irqrestore(pi_blocking_list_lock, flags);
 
@@ -137,7 +137,7 @@ static void vmx_vcpu_block(struct vcpu *v)
 
 static void vmx_pi_switch_from(struct vcpu *v)
 {
-    struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+    struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
 
     if ( test_bit(_VPF_blocked, &v->pause_flags) )
         return;
@@ -147,7 +147,7 @@ static void vmx_pi_switch_from(struct vcpu *v)
 
 static void vmx_pi_switch_to(struct vcpu *v)
 {
-    struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+    struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
     unsigned int dest = cpu_physical_id(v->processor);
 
     write_atomic(&pi_desc->ndst,
@@ -160,7 +160,7 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
 {
     unsigned long flags;
     spinlock_t *pi_blocking_list_lock;
-    struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+    struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
 
     /*
      * Set 'NV' field back to posted_intr_vector, so the
@@ -169,7 +169,7 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
      */
     write_atomic(&pi_desc->nv, posted_intr_vector);
 
-    pi_blocking_list_lock = v->arch.hvm_vmx.pi_blocking.lock;
+    pi_blocking_list_lock = v->arch.hvm.vmx.pi_blocking.lock;
 
     /* Prevent the compiler from eliminating the local variable.*/
     smp_rmb();
@@ -181,14 +181,14 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
     spin_lock_irqsave(pi_blocking_list_lock, flags);
 
     /*
-     * v->arch.hvm_vmx.pi_blocking.lock == NULL here means the vCPU
+     * v->arch.hvm.vmx.pi_blocking.lock == NULL here means the vCPU
      * was removed from the blocking list while we are acquiring the lock.
      */
-    if ( v->arch.hvm_vmx.pi_blocking.lock != NULL )
+    if ( v->arch.hvm.vmx.pi_blocking.lock != NULL )
     {
-        ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
-        list_del(&v->arch.hvm_vmx.pi_blocking.list);
-        v->arch.hvm_vmx.pi_blocking.lock = NULL;
+        ASSERT(v->arch.hvm.vmx.pi_blocking.lock == pi_blocking_list_lock);
+        list_del(&v->arch.hvm.vmx.pi_blocking.list);
+        v->arch.hvm.vmx.pi_blocking.lock = NULL;
     }
 
     spin_unlock_irqrestore(pi_blocking_list_lock, flags);
@@ -237,7 +237,7 @@ void vmx_pi_desc_fixup(unsigned int cpu)
         {
             list_del(&vmx->pi_blocking.list);
             vmx->pi_blocking.lock = NULL;
-            vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
+            vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm.vmx));
         }
         else
         {
@@ -338,7 +338,7 @@ void vmx_pi_hooks_assign(struct domain *d)
     for_each_vcpu ( d, v )
     {
         unsigned int dest = cpu_physical_id(v->processor);
-        struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
+        struct pi_desc *pi_desc = &v->arch.hvm.vmx.pi_desc;
 
         /*
          * We don't need to update NDST if vmx_pi_switch_to()
@@ -425,9 +425,9 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
-    spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
+    spin_lock_init(&v->arch.hvm.vmx.vmcs_lock);
 
-    INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
+    INIT_LIST_HEAD(&v->arch.hvm.vmx.pi_blocking.list);
 
     if ( (rc = vmx_create_vmcs(v)) != 0 )
     {
@@ -499,15 +499,15 @@ static void vmx_save_guest_msrs(struct vcpu *v)
      * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
      * be updated at any time via SWAPGS, which we cannot trap.
      */
-    v->arch.hvm_vmx.shadow_gs = rdgsshadow();
+    v->arch.hvm.vmx.shadow_gs = rdgsshadow();
 }
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
 {
-    wrgsshadow(v->arch.hvm_vmx.shadow_gs);
-    wrmsrl(MSR_STAR,           v->arch.hvm_vmx.star);
-    wrmsrl(MSR_LSTAR,          v->arch.hvm_vmx.lstar);
-    wrmsrl(MSR_SYSCALL_MASK,   v->arch.hvm_vmx.sfmask);
+    wrgsshadow(v->arch.hvm.vmx.shadow_gs);
+    wrmsrl(MSR_STAR,           v->arch.hvm.vmx.star);
+    wrmsrl(MSR_LSTAR,          v->arch.hvm.vmx.lstar);
+    wrmsrl(MSR_SYSCALL_MASK,   v->arch.hvm.vmx.sfmask);
 
     if ( cpu_has_rdtscp )
         wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
@@ -516,25 +516,25 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
 void vmx_update_cpu_exec_control(struct vcpu *v)
 {
     if ( nestedhvm_vcpu_in_guestmode(v) )
-        nvmx_update_exec_control(v, v->arch.hvm_vmx.exec_control);
+        nvmx_update_exec_control(v, v->arch.hvm.vmx.exec_control);
     else
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm.vmx.exec_control);
 }
 
 void vmx_update_secondary_exec_control(struct vcpu *v)
 {
     if ( nestedhvm_vcpu_in_guestmode(v) )
         nvmx_update_secondary_exec_control(v,
-            v->arch.hvm_vmx.secondary_exec_control);
+            v->arch.hvm.vmx.secondary_exec_control);
     else
         __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.secondary_exec_control);
+                  v->arch.hvm.vmx.secondary_exec_control);
 }
 
 void vmx_update_exception_bitmap(struct vcpu *v)
 {
-    u32 bitmap = unlikely(v->arch.hvm_vmx.vmx_realmode)
-        ? 0xffffffffu : v->arch.hvm_vmx.exception_bitmap;
+    u32 bitmap = unlikely(v->arch.hvm.vmx.vmx_realmode)
+        ? 0xffffffffu : v->arch.hvm.vmx.exception_bitmap;
 
     if ( nestedhvm_vcpu_in_guestmode(v) )
         nvmx_update_exception_bitmap(v, bitmap);
@@ -548,9 +548,9 @@ static void vmx_cpuid_policy_changed(struct vcpu *v)
 
     if ( opt_hvm_fep ||
          (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
-        v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
+        v->arch.hvm.vmx.exception_bitmap |= (1U << TRAP_invalid_op);
     else
-        v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
+        v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
 
     vmx_vmcs_enter(v);
     vmx_update_exception_bitmap(v);
@@ -600,7 +600,7 @@ static void vmx_save_dr(struct vcpu *v)
 
     /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
     v->arch.hvm.flag_dr_dirty = 0;
-    v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
+    v->arch.hvm.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
     vmx_update_cpu_exec_control(v);
 
     v->arch.debugreg[0] = read_debugreg(0);
@@ -769,21 +769,21 @@ static int vmx_vmcs_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
 
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-    data->shadow_gs        = v->arch.hvm_vmx.shadow_gs;
+    data->shadow_gs        = v->arch.hvm.vmx.shadow_gs;
     data->msr_flags        = 0;
-    data->msr_lstar        = v->arch.hvm_vmx.lstar;
-    data->msr_star         = v->arch.hvm_vmx.star;
-    data->msr_cstar        = v->arch.hvm_vmx.cstar;
-    data->msr_syscall_mask = v->arch.hvm_vmx.sfmask;
+    data->msr_lstar        = v->arch.hvm.vmx.lstar;
+    data->msr_star         = v->arch.hvm.vmx.star;
+    data->msr_cstar        = v->arch.hvm.vmx.cstar;
+    data->msr_syscall_mask = v->arch.hvm.vmx.sfmask;
 }
 
 static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
-    v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
-    v->arch.hvm_vmx.star      = data->msr_star;
-    v->arch.hvm_vmx.lstar     = data->msr_lstar;
-    v->arch.hvm_vmx.cstar     = data->msr_cstar;
-    v->arch.hvm_vmx.sfmask    = data->msr_syscall_mask;
+    v->arch.hvm.vmx.shadow_gs = data->shadow_gs;
+    v->arch.hvm.vmx.star      = data->msr_star;
+    v->arch.hvm.vmx.lstar     = data->msr_lstar;
+    v->arch.hvm.vmx.cstar     = data->msr_cstar;
+    v->arch.hvm.vmx.sfmask    = data->msr_syscall_mask;
 }
 
 
@@ -875,10 +875,10 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
 static void vmx_fpu_enter(struct vcpu *v)
 {
     vcpu_restore_fpu_lazy(v);
-    v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
+    v->arch.hvm.vmx.exception_bitmap &= ~(1u << TRAP_no_device);
     vmx_update_exception_bitmap(v);
-    v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
-    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+    v->arch.hvm.vmx.host_cr0 &= ~X86_CR0_TS;
+    __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
 }
 
 static void vmx_fpu_leave(struct vcpu *v)
@@ -886,10 +886,10 @@ static void vmx_fpu_leave(struct vcpu *v)
     ASSERT(!v->fpu_dirtied);
     ASSERT(read_cr0() & X86_CR0_TS);
 
-    if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm.vmx.host_cr0 & X86_CR0_TS) )
     {
-        v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
-        __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+        v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
+        __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
     }
 
     /*
@@ -902,7 +902,7 @@ static void vmx_fpu_leave(struct vcpu *v)
     {
         v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
         __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
-        v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
+        v->arch.hvm.vmx.exception_bitmap |= (1u << TRAP_no_device);
         vmx_update_exception_bitmap(v);
     }
 }
@@ -1057,10 +1057,10 @@ static void vmx_get_segment_register(struct vcpu *v, 
enum x86_segment seg,
         (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
 
     /* Adjust for virtual 8086 mode */
-    if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr 
-         && !(v->arch.hvm_vmx.vm86_segment_mask & (1u << seg)) )
+    if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr
+         && !(v->arch.hvm.vmx.vm86_segment_mask & (1u << seg)) )
     {
-        struct segment_register *sreg = &v->arch.hvm_vmx.vm86_saved_seg[seg];
+        struct segment_register *sreg = &v->arch.hvm.vmx.vm86_saved_seg[seg];
         if ( seg == x86_seg_tr ) 
             *reg = *sreg;
         else if ( reg->base != sreg->base || seg == x86_seg_ss )
@@ -1097,10 +1097,10 @@ static void vmx_set_segment_register(struct vcpu *v, 
enum x86_segment seg,
     base = reg->base;
 
     /* Adjust CS/SS/DS/ES/FS/GS/TR for virtual 8086 mode */
-    if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr )
+    if ( v->arch.hvm.vmx.vmx_realmode && seg <= x86_seg_tr )
     {
         /* Remember the proper contents */
-        v->arch.hvm_vmx.vm86_saved_seg[seg] = *reg;
+        v->arch.hvm.vmx.vm86_saved_seg[seg] = *reg;
         
         if ( seg == x86_seg_tr ) 
         {
@@ -1119,10 +1119,10 @@ static void vmx_set_segment_register(struct vcpu *v, 
enum x86_segment seg,
                     cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED],
                             val, val & ~VM86_TSS_UPDATED);
                 }
-                v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
+                v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg);
             }
             else
-                v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg);
+                v->arch.hvm.vmx.vm86_segment_mask |= (1u << seg);
         }
         else
         {
@@ -1135,10 +1135,10 @@ static void vmx_set_segment_register(struct vcpu *v, 
enum x86_segment seg,
                 sel = base >> 4;
                 attr = vm86_ds_attr;
                 limit = 0xffff;
-                v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
+                v->arch.hvm.vmx.vm86_segment_mask &= ~(1u << seg);
             }
             else 
-                v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg);
+                v->arch.hvm.vmx.vm86_segment_mask |= (1u << seg);
         }
     }
 
@@ -1187,7 +1187,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
 static unsigned long vmx_get_shadow_gs_base(struct vcpu *v)
 {
-    return v->arch.hvm_vmx.shadow_gs;
+    return v->arch.hvm.vmx.shadow_gs;
 }
 
 static int vmx_set_guest_pat(struct vcpu *v, u64 gpat)
@@ -1310,9 +1310,9 @@ static void vmx_set_tsc_offset(struct vcpu *v, u64 
offset, u64 at_tsc)
 static void vmx_set_rdtsc_exiting(struct vcpu *v, bool_t enable)
 {
     vmx_vmcs_enter(v);
-    v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
+    v->arch.hvm.vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
     if ( enable )
-        v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
+        v->arch.hvm.vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
     vmx_update_cpu_exec_control(v);
     vmx_vmcs_exit(v);
 }
@@ -1320,10 +1320,10 @@ static void vmx_set_rdtsc_exiting(struct vcpu *v, 
bool_t enable)
 static void vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
     if ( enable )
-        v->arch.hvm_vmx.secondary_exec_control |=
+        v->arch.hvm.vmx.secondary_exec_control |=
             SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
     else
-        v->arch.hvm_vmx.secondary_exec_control &=
+        v->arch.hvm.vmx.secondary_exec_control &=
             ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
 
     vmx_vmcs_enter(v);
@@ -1432,9 +1432,9 @@ static void vmx_update_host_cr3(struct vcpu *v)
 void vmx_update_debug_state(struct vcpu *v)
 {
     if ( v->arch.hvm.debug_state_latch )
-        v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
+        v->arch.hvm.vmx.exception_bitmap |= 1U << TRAP_int3;
     else
-        v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
+        v->arch.hvm.vmx.exception_bitmap &= ~(1U << TRAP_int3);
 
     vmx_vmcs_enter(v);
     vmx_update_exception_bitmap(v);
@@ -1462,20 +1462,20 @@ static void vmx_update_guest_cr(struct vcpu *v, 
unsigned int cr,
         if ( paging_mode_hap(v->domain) )
         {
             /* Manage GUEST_CR3 when CR0.PE=0. */
-            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
+            uint32_t old_ctls = v->arch.hvm.vmx.exec_control;
             uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
                                  CPU_BASED_CR3_STORE_EXITING);
 
-            v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
+            v->arch.hvm.vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
-                v->arch.hvm_vmx.exec_control |= cr3_ctls;
+                v->arch.hvm.vmx.exec_control |= cr3_ctls;
 
             /* Trap CR3 updates if CR3 memory events are enabled. */
             if ( v->domain->arch.monitor.write_ctrlreg_enabled &
                  monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
-                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+                v->arch.hvm.vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
 
-            if ( old_ctls != v->arch.hvm_vmx.exec_control )
+            if ( old_ctls != v->arch.hvm.vmx.exec_control )
                 vmx_update_cpu_exec_control(v);
         }
 
@@ -1498,7 +1498,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr,
         realmode = !(v->arch.hvm.guest_cr[0] & X86_CR0_PE);
 
         if ( !vmx_unrestricted_guest(v) &&
-             (realmode != v->arch.hvm_vmx.vmx_realmode) )
+             (realmode != v->arch.hvm.vmx.vmx_realmode) )
         {
             enum x86_segment s;
             struct segment_register reg[x86_seg_tr + 1];
@@ -1510,7 +1510,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr,
              * the saved values we'll use when returning to prot mode. */
             for ( s = 0; s < ARRAY_SIZE(reg); s++ )
                 hvm_get_segment_register(v, s, &reg[s]);
-            v->arch.hvm_vmx.vmx_realmode = realmode;
+            v->arch.hvm.vmx.vmx_realmode = realmode;
 
             if ( realmode )
             {
@@ -1520,9 +1520,9 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr,
             else
             {
                 for ( s = 0; s < ARRAY_SIZE(reg); s++ )
-                    if ( !(v->arch.hvm_vmx.vm86_segment_mask & (1<<s)) )
+                    if ( !(v->arch.hvm.vmx.vm86_segment_mask & (1<<s)) )
                         hvm_set_segment_register(
-                            v, s, &v->arch.hvm_vmx.vm86_saved_seg[s]);
+                            v, s, &v->arch.hvm.vmx.vm86_saved_seg[s]);
             }
 
             vmx_update_exception_bitmap(v);
@@ -1544,7 +1544,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr,
             nvmx_set_cr_read_shadow(v, 4);
 
         v->arch.hvm.hw_cr[4] |= v->arch.hvm.guest_cr[4];
-        if ( v->arch.hvm_vmx.vmx_realmode )
+        if ( v->arch.hvm.vmx.vmx_realmode )
             v->arch.hvm.hw_cr[4] |= X86_CR4_VME;
 
         if ( !hvm_paging_enabled(v) )
@@ -1593,27 +1593,27 @@ static void vmx_update_guest_cr(struct vcpu *v, 
unsigned int cr,
              * Update CR4 host mask to only trap when the guest tries to set
              * bits that are controlled by the hypervisor.
              */
-            v->arch.hvm_vmx.cr4_host_mask =
+            v->arch.hvm.vmx.cr4_host_mask =
                 (HVM_CR4_HOST_MASK | X86_CR4_PKE |
                  ~hvm_cr4_guest_valid_bits(v->domain, false));
 
-            v->arch.hvm_vmx.cr4_host_mask |= v->arch.hvm_vmx.vmx_realmode ?
+            v->arch.hvm.vmx.cr4_host_mask |= v->arch.hvm.vmx.vmx_realmode ?
                                              X86_CR4_VME : 0;
-            v->arch.hvm_vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
+            v->arch.hvm.vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
                                              (X86_CR4_PSE | X86_CR4_SMEP |
                                               X86_CR4_SMAP)
                                              : 0;
             if ( v->domain->arch.monitor.write_ctrlreg_enabled &
                  monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4) )
-                v->arch.hvm_vmx.cr4_host_mask |=
+                v->arch.hvm.vmx.cr4_host_mask |=
                 ~v->domain->arch.monitor.write_ctrlreg_mask[VM_EVENT_X86_CR4];
 
             if ( nestedhvm_vcpu_in_guestmode(v) )
                 /* Add the nested host mask to get the more restrictive one. */
-                v->arch.hvm_vmx.cr4_host_mask |= get_vvmcs(v,
+                v->arch.hvm.vmx.cr4_host_mask |= get_vvmcs(v,
                                                            
CR4_GUEST_HOST_MASK);
 
-            __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask);
+            __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm.vmx.cr4_host_mask);
         }
 
         break;
@@ -1774,8 +1774,8 @@ static void __vmx_inject_exception(int trap, int type, 
int error_code)
 
     /* Can't inject exceptions in virtual 8086 mode because they would 
      * use the protected-mode IDT.  Emulate at the next vmenter instead. */
-    if ( curr->arch.hvm_vmx.vmx_realmode ) 
-        curr->arch.hvm_vmx.vmx_emulate = 1;
+    if ( curr->arch.hvm.vmx.vmx_realmode )
+        curr->arch.hvm.vmx.vmx_emulate = 1;
 }
 
 void vmx_inject_extint(int trap, uint8_t source)
@@ -1989,10 +1989,10 @@ static void vmx_process_isr(int isr, struct vcpu *v)
     for ( i = 0x10; i < NR_VECTORS; ++i )
         if ( vlapic_test_vector(i, &vlapic->regs->data[APIC_IRR]) ||
              vlapic_test_vector(i, &vlapic->regs->data[APIC_ISR]) )
-            set_bit(i, v->arch.hvm_vmx.eoi_exit_bitmap);
+            set_bit(i, v->arch.hvm.vmx.eoi_exit_bitmap);
 
-    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap); ++i )
-        __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm_vmx.eoi_exit_bitmap[i]);
+    for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); ++i )
+        __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm.vmx.eoi_exit_bitmap[i]);
 
     vmx_vmcs_exit(v);
 }
@@ -2054,23 +2054,23 @@ static void __vmx_deliver_posted_interrupt(struct vcpu 
*v)
 
 static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector)
 {
-    if ( pi_test_and_set_pir(vector, &v->arch.hvm_vmx.pi_desc) )
+    if ( pi_test_and_set_pir(vector, &v->arch.hvm.vmx.pi_desc) )
         return;
 
-    if ( unlikely(v->arch.hvm_vmx.eoi_exitmap_changed) )
+    if ( unlikely(v->arch.hvm.vmx.eoi_exitmap_changed) )
     {
         /*
          * If EOI exitbitmap needs to changed or notification vector
          * can't be allocated, interrupt will not be injected till
          * VMEntry as it used to be.
          */
-        pi_set_on(&v->arch.hvm_vmx.pi_desc);
+        pi_set_on(&v->arch.hvm.vmx.pi_desc);
     }
     else
     {
         struct pi_desc old, new, prev;
 
-        prev.control = v->arch.hvm_vmx.pi_desc.control;
+        prev.control = v->arch.hvm.vmx.pi_desc.control;
 
         do {
             /*
@@ -2086,12 +2086,12 @@ static void vmx_deliver_posted_intr(struct vcpu *v, u8 
vector)
                 return;
             }
 
-            old.control = v->arch.hvm_vmx.pi_desc.control &
+            old.control = v->arch.hvm.vmx.pi_desc.control &
                           ~((1 << POSTED_INTR_ON) | (1 << POSTED_INTR_SN));
-            new.control = v->arch.hvm_vmx.pi_desc.control |
+            new.control = v->arch.hvm.vmx.pi_desc.control |
                           (1 << POSTED_INTR_ON);
 
-            prev.control = cmpxchg(&v->arch.hvm_vmx.pi_desc.control,
+            prev.control = cmpxchg(&v->arch.hvm.vmx.pi_desc.control,
                                    old.control, new.control);
         } while ( prev.control != old.control );
 
@@ -2108,11 +2108,11 @@ static void vmx_sync_pir_to_irr(struct vcpu *v)
     unsigned int group, i;
     DECLARE_BITMAP(pending_intr, NR_VECTORS);
 
-    if ( !pi_test_and_clear_on(&v->arch.hvm_vmx.pi_desc) )
+    if ( !pi_test_and_clear_on(&v->arch.hvm.vmx.pi_desc) )
         return;
 
     for ( group = 0; group < ARRAY_SIZE(pending_intr); group++ )
-        pending_intr[group] = pi_get_pir(&v->arch.hvm_vmx.pi_desc, group);
+        pending_intr[group] = pi_get_pir(&v->arch.hvm.vmx.pi_desc, group);
 
     for_each_set_bit(i, pending_intr, NR_VECTORS)
         vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
@@ -2120,7 +2120,7 @@ static void vmx_sync_pir_to_irr(struct vcpu *v)
 
 static bool vmx_test_pir(const struct vcpu *v, uint8_t vec)
 {
-    return pi_test_pir(vec, &v->arch.hvm_vmx.pi_desc);
+    return pi_test_pir(vec, &v->arch.hvm.vmx.pi_desc);
 }
 
 static void vmx_handle_eoi(u8 vector)
@@ -2164,7 +2164,7 @@ static void vmx_vcpu_update_eptp(struct vcpu *v)
 
     __vmwrite(EPT_POINTER, ept->eptp);
 
-    if ( v->arch.hvm_vmx.secondary_exec_control &
+    if ( v->arch.hvm.vmx.secondary_exec_control &
          SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
         __vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx);
 
@@ -2186,7 +2186,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
 
     if ( !d->is_dying && altp2m_active(d) )
     {
-        v->arch.hvm_vmx.secondary_exec_control |= mask;
+        v->arch.hvm.vmx.secondary_exec_control |= mask;
         __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
         __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
 
@@ -2207,12 +2207,12 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
                 __vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx);
             }
             else
-                v->arch.hvm_vmx.secondary_exec_control &=
+                v->arch.hvm.vmx.secondary_exec_control &=
                     ~SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
         }
     }
     else
-        v->arch.hvm_vmx.secondary_exec_control &= ~mask;
+        v->arch.hvm.vmx.secondary_exec_control &= ~mask;
 
     vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
@@ -2379,7 +2379,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs 
*regs)
             list_del(&vmx->pi_blocking.list);
             ASSERT(vmx->pi_blocking.lock == lock);
             vmx->pi_blocking.lock = NULL;
-            vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
+            vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm.vmx));
         }
     }
 
@@ -2591,7 +2591,7 @@ static void vmx_dr_access(unsigned long 
exit_qualification,
         __restore_debug_registers(v);
 
     /* Allow guest direct access to DR registers */
-    v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
+    v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
     vmx_update_cpu_exec_control(v);
 }
 
@@ -2906,19 +2906,19 @@ static int vmx_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
         break;
 
     case MSR_STAR:
-        *msr_content = curr->arch.hvm_vmx.star;
+        *msr_content = curr->arch.hvm.vmx.star;
         break;
 
     case MSR_LSTAR:
-        *msr_content = curr->arch.hvm_vmx.lstar;
+        *msr_content = curr->arch.hvm.vmx.lstar;
         break;
 
     case MSR_CSTAR:
-        *msr_content = curr->arch.hvm_vmx.cstar;
+        *msr_content = curr->arch.hvm.vmx.cstar;
         break;
 
     case MSR_SYSCALL_MASK:
-        *msr_content = curr->arch.hvm_vmx.sfmask;
+        *msr_content = curr->arch.hvm.vmx.sfmask;
         break;
 
     case MSR_IA32_DEBUGCTLMSR:
@@ -3047,7 +3047,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v)
         return;
 
     vmx_vmcs_enter(v);
-    v->arch.hvm_vmx.secondary_exec_control &=
+    v->arch.hvm.vmx.secondary_exec_control &=
         ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
           SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
     if ( !vlapic_hw_disabled(vlapic) &&
@@ -3055,7 +3055,7 @@ void vmx_vlapic_msr_changed(struct vcpu *v)
     {
         if ( virtualize_x2apic_mode && vlapic_x2apic_mode(vlapic) )
         {
-            v->arch.hvm_vmx.secondary_exec_control |=
+            v->arch.hvm.vmx.secondary_exec_control |=
                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
             if ( cpu_has_vmx_apic_reg_virt )
             {
@@ -3075,10 +3075,10 @@ void vmx_vlapic_msr_changed(struct vcpu *v)
             }
         }
         else
-            v->arch.hvm_vmx.secondary_exec_control |=
+            v->arch.hvm.vmx.secondary_exec_control |=
                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
     }
-    if ( !(v->arch.hvm_vmx.secondary_exec_control &
+    if ( !(v->arch.hvm.vmx.secondary_exec_control &
            SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) )
         for ( msr = MSR_IA32_APICBASE_MSR;
               msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++ )
@@ -3129,25 +3129,25 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         break;
 
     case MSR_STAR:
-        v->arch.hvm_vmx.star = msr_content;
+        v->arch.hvm.vmx.star = msr_content;
         wrmsrl(MSR_STAR, msr_content);
         break;
 
     case MSR_LSTAR:
         if ( !is_canonical_address(msr_content) )
             goto gp_fault;
-        v->arch.hvm_vmx.lstar = msr_content;
+        v->arch.hvm.vmx.lstar = msr_content;
         wrmsrl(MSR_LSTAR, msr_content);
         break;
 
     case MSR_CSTAR:
         if ( !is_canonical_address(msr_content) )
             goto gp_fault;
-        v->arch.hvm_vmx.cstar = msr_content;
+        v->arch.hvm.vmx.cstar = msr_content;
         break;
 
     case MSR_SYSCALL_MASK:
-        v->arch.hvm_vmx.sfmask = msr_content;
+        v->arch.hvm.vmx.sfmask = msr_content;
         wrmsrl(MSR_SYSCALL_MASK, msr_content);
         break;
 
@@ -3188,7 +3188,7 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
          * the guest won't execute correctly either.  Simply crash the domain
          * to make the failure obvious.
          */
-        if ( !(v->arch.hvm_vmx.lbr_flags & LBR_MSRS_INSERTED) &&
+        if ( !(v->arch.hvm.vmx.lbr_flags & LBR_MSRS_INSERTED) &&
              (msr_content & IA32_DEBUGCTLMSR_LBR) )
         {
             const struct lbr_info *lbr = last_branch_msr_get();
@@ -3220,11 +3220,11 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
                 }
             }
 
-            v->arch.hvm_vmx.lbr_flags |= LBR_MSRS_INSERTED;
+            v->arch.hvm.vmx.lbr_flags |= LBR_MSRS_INSERTED;
             if ( lbr_tsx_fixup_needed )
-                v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_TSX;
+                v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_TSX;
             if ( bdw_erratum_bdf14_fixup_needed )
-                v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_BDF14;
+                v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_BDF14;
         }
 
         __vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
@@ -3420,7 +3420,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason,
             printk("  Entry out of range\n");
         else
         {
-            msr = &curr->arch.hvm_vmx.msr_area[idx];
+            msr = &curr->arch.hvm.vmx.msr_area[idx];
 
             printk("  msr %08x val %016"PRIx64" (mbz %#x)\n",
                    msr->index, msr->data, msr->mbz);
@@ -3453,7 +3453,7 @@ void vmx_enter_realmode(struct cpu_user_regs *regs)
     /* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3.  Since
      * we have CR4.VME == 1 and our own TSS with an empty interrupt
      * redirection bitmap, all software INTs will be handled by vm86 */
-    v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
+    v->arch.hvm.vmx.vm86_saved_eflags = regs->eflags;
     regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
 }
 
@@ -3619,9 +3619,9 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
          * values to match.
          */
         __vmread(GUEST_CR4, &v->arch.hvm.hw_cr[4]);
-        v->arch.hvm.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
+        v->arch.hvm.guest_cr[4] &= v->arch.hvm.vmx.cr4_host_mask;
         v->arch.hvm.guest_cr[4] |= (v->arch.hvm.hw_cr[4] &
-                                    ~v->arch.hvm_vmx.cr4_host_mask);
+                                    ~v->arch.hvm.vmx.cr4_host_mask);
 
         __vmread(GUEST_CR3, &v->arch.hvm.hw_cr[3]);
         if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
@@ -3672,12 +3672,12 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
      * figure out whether it has done so and update the altp2m data.
      */
     if ( altp2m_active(v->domain) &&
-        (v->arch.hvm_vmx.secondary_exec_control &
+        (v->arch.hvm.vmx.secondary_exec_control &
         SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
     {
         unsigned long idx;
 
-        if ( v->arch.hvm_vmx.secondary_exec_control &
+        if ( v->arch.hvm.vmx.secondary_exec_control &
             SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
             __vmread(EPTP_INDEX, &idx);
         else
@@ -3719,11 +3719,11 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
 
-    if ( v->arch.hvm_vmx.vmx_realmode )
+    if ( v->arch.hvm.vmx.vmx_realmode )
     {
         /* Put RFLAGS back the way the guest wants it */
         regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
-        regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+        regs->eflags |= (v->arch.hvm.vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
 
         /* Unless this exit was for an interrupt, we've hit something
          * vm86 can't handle.  Try again, using the emulator. */
@@ -3736,7 +3736,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             {
         default:
                 perfc_incr(realmode_exits);
-                v->arch.hvm_vmx.vmx_emulate = 1;
+                v->arch.hvm.vmx.vmx_emulate = 1;
                 HVMTRACE_0D(REALMODE_EMULATE);
                 return;
             }
@@ -3912,12 +3912,12 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         break;
     case EXIT_REASON_PENDING_VIRT_INTR:
         /* Disable the interrupt window. */
-        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+        v->arch.hvm.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
         vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_PENDING_VIRT_NMI:
         /* Disable the NMI window. */
-        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+        v->arch.hvm.vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
         vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_TASK_SWITCH: {
@@ -4166,7 +4166,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     }
 
     case EXIT_REASON_MONITOR_TRAP_FLAG:
-        v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+        v->arch.hvm.vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
         vmx_update_cpu_exec_control(v);
         if ( v->arch.hvm.single_step )
         {
@@ -4266,8 +4266,8 @@ out:
 static void lbr_tsx_fixup(void)
 {
     struct vcpu *curr = current;
-    unsigned int msr_count = curr->arch.hvm_vmx.msr_save_count;
-    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+    unsigned int msr_count = curr->arch.hvm.vmx.msr_save_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm.vmx.msr_area;
     struct vmx_msr_entry *msr;
 
     if ( (msr = vmx_find_msr(curr, lbr_from_start, VMX_MSR_GUEST)) != NULL )
@@ -4313,9 +4313,9 @@ static void lbr_fixup(void)
 {
     struct vcpu *curr = current;
 
-    if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_TSX )
+    if ( curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_TSX )
         lbr_tsx_fixup();
-    if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_BDF14 )
+    if ( curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_BDF14 )
         bdw_erratum_bdf14_fixup();
 }
 
@@ -4351,14 +4351,14 @@ bool vmx_vmenter_helper(const struct cpu_user_regs 
*regs)
         if ( !old_asid && new_asid )
         {
             /* VPID was disabled: now enabled. */
-            curr->arch.hvm_vmx.secondary_exec_control |=
+            curr->arch.hvm.vmx.secondary_exec_control |=
                 SECONDARY_EXEC_ENABLE_VPID;
             vmx_update_secondary_exec_control(curr);
         }
         else if ( old_asid && !new_asid )
         {
             /* VPID was enabled: now disabled. */
-            curr->arch.hvm_vmx.secondary_exec_control &=
+            curr->arch.hvm.vmx.secondary_exec_control &=
                 ~SECONDARY_EXEC_ENABLE_VPID;
             vmx_update_secondary_exec_control(curr);
         }
@@ -4383,7 +4383,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
     }
 
  out:
-    if ( unlikely(curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_MASK) )
+    if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) )
         lbr_fixup();
 
     HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 5cdea47aea..0e45db83e5 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -82,7 +82,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
             gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap 
failed\n");
             return -ENOMEM;
         }
-        v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
+        v->arch.hvm.vmx.vmread_bitmap = vmread_bitmap;
 
         clear_domain_page(page_to_mfn(vmread_bitmap));
 
@@ -92,7 +92,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
             gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap 
failed\n");
             return -ENOMEM;
         }
-        v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
+        v->arch.hvm.vmx.vmwrite_bitmap = vmwrite_bitmap;
 
         vw = __map_domain_page(vmwrite_bitmap);
         clear_page(vw);
@@ -138,7 +138,7 @@ void nvmx_vcpu_destroy(struct vcpu *v)
      * leak of L1 VMCS page.
      */
     if ( nvcpu->nv_n1vmcx_pa )
-        v->arch.hvm_vmx.vmcs_pa = nvcpu->nv_n1vmcx_pa;
+        v->arch.hvm.vmx.vmcs_pa = nvcpu->nv_n1vmcx_pa;
 
     if ( nvcpu->nv_n2vmcx_pa )
     {
@@ -155,15 +155,15 @@ void nvmx_vcpu_destroy(struct vcpu *v)
             xfree(item);
         }
 
-    if ( v->arch.hvm_vmx.vmread_bitmap )
+    if ( v->arch.hvm.vmx.vmread_bitmap )
     {
-        free_domheap_page(v->arch.hvm_vmx.vmread_bitmap);
-        v->arch.hvm_vmx.vmread_bitmap = NULL;
+        free_domheap_page(v->arch.hvm.vmx.vmread_bitmap);
+        v->arch.hvm.vmx.vmread_bitmap = NULL;
     }
-    if ( v->arch.hvm_vmx.vmwrite_bitmap )
+    if ( v->arch.hvm.vmx.vmwrite_bitmap )
     {
-        free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap);
-        v->arch.hvm_vmx.vmwrite_bitmap = NULL;
+        free_domheap_page(v->arch.hvm.vmx.vmwrite_bitmap);
+        v->arch.hvm.vmx.vmwrite_bitmap = NULL;
     }
 }
  
@@ -809,7 +809,7 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
         hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
     nvcpu->nv_vvmcx = NULL;
     nvcpu->nv_vvmcxaddr = INVALID_PADDR;
-    v->arch.hvm_vmx.vmcs_shadow_maddr = 0;
+    v->arch.hvm.vmx.vmcs_shadow_maddr = 0;
     for (i=0; i<2; i++) {
         if ( nvmx->iobitmap[i] ) {
             hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
@@ -1101,8 +1101,8 @@ static void load_shadow_guest_state(struct vcpu *v)
                      (get_vvmcs(v, CR4_READ_SHADOW) & cr_gh_mask);
     __vmwrite(CR4_READ_SHADOW, cr_read_shadow);
     /* Add the nested host mask to the one set by vmx_update_guest_cr. */
-    v->arch.hvm_vmx.cr4_host_mask |= cr_gh_mask;
-    __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask);
+    v->arch.hvm.vmx.cr4_host_mask |= cr_gh_mask;
+    __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm.vmx.cr4_host_mask);
 
     /* TODO: CR3 target control */
 }
@@ -1133,18 +1133,18 @@ static bool_t nvmx_vpid_enabled(const struct vcpu *v)
 
 static void nvmx_set_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs)
 {
-    paddr_t vvmcs_maddr = v->arch.hvm_vmx.vmcs_shadow_maddr;
+    paddr_t vvmcs_maddr = v->arch.hvm.vmx.vmcs_shadow_maddr;
 
     __vmpclear(vvmcs_maddr);
     vvmcs->vmcs_revision_id |= VMCS_RID_TYPE_MASK;
     __vmwrite(VMCS_LINK_POINTER, vvmcs_maddr);
-    __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmread_bitmap));
-    __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmwrite_bitmap));
+    __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm.vmx.vmread_bitmap));
+    __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm.vmx.vmwrite_bitmap));
 }
 
 static void nvmx_clear_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs)
 {
-    paddr_t vvmcs_maddr = v->arch.hvm_vmx.vmcs_shadow_maddr;
+    paddr_t vvmcs_maddr = v->arch.hvm.vmx.vmcs_shadow_maddr;
 
     __vmpclear(vvmcs_maddr);
     vvmcs->vmcs_revision_id &= ~VMCS_RID_TYPE_MASK;
@@ -1159,7 +1159,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     unsigned long lm_l1, lm_l2;
 
-    vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n2vmcx_pa);
+    vmx_vmcs_switch(v->arch.hvm.vmx.vmcs_pa, nvcpu->nv_n2vmcx_pa);
 
     nestedhvm_vcpu_enter_guestmode(v);
     nvcpu->nv_vmentry_pending = 0;
@@ -1197,7 +1197,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
     regs->rflags = get_vvmcs(v, GUEST_RFLAGS);
 
     /* updating host cr0 to sync TS bit */
-    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+    __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
 
     /* Setup virtual ETP for L2 guest*/
     if ( nestedhvm_paging_mode_hap(v) )
@@ -1234,7 +1234,7 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct 
cpu_user_regs *regs)
     if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) )
         shadow_to_vvmcs(v, GUEST_CR3);
 
-    if ( v->arch.hvm_vmx.cr4_host_mask != ~0UL )
+    if ( v->arch.hvm.vmx.cr4_host_mask != ~0UL )
         /* Only need to update nested GUEST_CR4 if not all bits are trapped. */
         set_vvmcs(v, GUEST_CR4, v->arch.hvm.guest_cr[4]);
 }
@@ -1375,7 +1375,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     /* This will clear current pCPU bit in p2m->dirty_cpumask */
     np2m_schedule(NP2M_SCHEDLE_OUT);
 
-    vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
+    vmx_vmcs_switch(v->arch.hvm.vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
 
     nestedhvm_vcpu_exit_guestmode(v);
     nvcpu->nv_vmexit_pending = 0;
@@ -1404,7 +1404,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     regs->rflags = X86_EFLAGS_MBS;
 
     /* updating host cr0 to sync TS bit */
-    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+    __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
 
     if ( cpu_has_vmx_virtual_intr_delivery )
         nvmx_update_apicv(v);
@@ -1511,12 +1511,12 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
      * `fork' the host vmcs to shadow_vmcs
      * vmcs_lock is not needed since we are on current
      */
-    nvcpu->nv_n1vmcx_pa = v->arch.hvm_vmx.vmcs_pa;
-    __vmpclear(v->arch.hvm_vmx.vmcs_pa);
+    nvcpu->nv_n1vmcx_pa = v->arch.hvm.vmx.vmcs_pa;
+    __vmpclear(v->arch.hvm.vmx.vmcs_pa);
     copy_domain_page(_mfn(PFN_DOWN(nvcpu->nv_n2vmcx_pa)),
-                     _mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa)));
-    __vmptrld(v->arch.hvm_vmx.vmcs_pa);
-    v->arch.hvm_vmx.launched = 0;
+                     _mfn(PFN_DOWN(v->arch.hvm.vmx.vmcs_pa)));
+    __vmptrld(v->arch.hvm.vmx.vmcs_pa);
+    v->arch.hvm.vmx.launched = 0;
     vmsucceed(regs);
 
     return X86EMUL_OKAY;
@@ -1636,7 +1636,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
     }
 
     launched = vvmcs_launched(&nvmx->launched_list,
-                              PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+                              PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
     if ( !launched )
     {
         vmfail_valid(regs, VMX_INSN_VMRESUME_NONLAUNCHED_VMCS);
@@ -1670,7 +1670,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
     }
 
     launched = vvmcs_launched(&nvmx->launched_list,
-                              PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+                              PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
     if ( launched )
     {
         vmfail_valid(regs, VMX_INSN_VMLAUNCH_NONCLEAR_VMCS);
@@ -1681,7 +1681,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
         if ( rc == X86EMUL_OKAY )
         {
             if ( set_vvmcs_launched(&nvmx->launched_list,
-                                    
PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr)) < 0 )
+                                    
PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr)) < 0 )
                 return X86EMUL_UNHANDLEABLE;
         }
     }
@@ -1732,7 +1732,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
                 }
                 nvcpu->nv_vvmcx = vvmcx;
                 nvcpu->nv_vvmcxaddr = gpa;
-                v->arch.hvm_vmx.vmcs_shadow_maddr =
+                v->arch.hvm.vmx.vmcs_shadow_maddr =
                     mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
             }
             else
@@ -1806,7 +1806,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
         if ( cpu_has_vmx_vmcs_shadowing )
             nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
         clear_vvmcs_launched(&nvmx->launched_list,
-                             PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr));
+                             PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
         nvmx_purge_vvmcs(v);
     }
     else 
@@ -2041,7 +2041,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 
*msr_content)
     case MSR_IA32_VMX_BASIC:
     {
         const struct vmcs_struct *vmcs =
-            map_domain_page(_mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa)));
+            map_domain_page(_mfn(PFN_DOWN(v->arch.hvm.vmx.vmcs_pa)));
 
         data = (host_data & (~0ul << 32)) |
                (vmcs->vmcs_revision_id & 0x7fffffff);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 14b593923b..1ff4f14ae4 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -643,7 +643,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, 
unsigned long gfn)
         struct vcpu *v;
 
         for_each_vcpu ( p2m->domain, v )
-            v->arch.hvm_vmx.ept_spurious_misconfig = 1;
+            v->arch.hvm.vmx.ept_spurious_misconfig = 1;
     }
 
     return rc;
@@ -658,9 +658,9 @@ bool_t ept_handle_misconfig(uint64_t gpa)
 
     p2m_lock(p2m);
 
-    spurious = curr->arch.hvm_vmx.ept_spurious_misconfig;
+    spurious = curr->arch.hvm.vmx.ept_spurious_misconfig;
     rc = resolve_misconfig(p2m, PFN_DOWN(gpa));
-    curr->arch.hvm_vmx.ept_spurious_misconfig = 0;
+    curr->arch.hvm.vmx.ept_spurious_misconfig = 0;
 
     p2m_unlock(p2m);
 
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index 6693508160..052228cdda 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -82,14 +82,14 @@ void __dummy__(void)
     DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
     BLANK();
 
-    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
-    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
+    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm.svm.vmcb_pa);
+    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm.svm.vmcb);
     BLANK();
 
-    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
-    OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
-    OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
+    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm.vmx.launched);
+    OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm.vmx.vmx_realmode);
+    OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm.vmx.vmx_emulate);
+    OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm.vmx.vm86_segment_mask);
     OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm.guest_cr[2]);
     BLANK();
 
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index bab3aa349a..a5668e6495 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -439,7 +439,7 @@ int pt_irq_create_bind(
 
         /* Use interrupt posting if it is supported. */
         if ( iommu_intpost )
-            pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
+            pi_update_irte(vcpu ? &vcpu->arch.hvm.vmx.pi_desc : NULL,
                            info, pirq_dpci->gmsi.gvec);
 
         if ( pt_irq_bind->u.msi.gflags & XEN_DOMCTL_VMSI_X86_UNMASKED )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 68bbf3336b..c7cdf974bf 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -606,10 +606,6 @@ struct guest_memory_policy
 void update_guest_memory_policy(struct vcpu *v,
                                 struct guest_memory_policy *policy);
 
-/* Shorthands to improve code legibility. */
-#define hvm_vmx         hvm.u.vmx
-#define hvm_svm         hvm.u.svm
-
 bool update_runstate_area(struct vcpu *);
 bool update_secondary_system_time(struct vcpu *,
                                   struct vcpu_time_info *);
diff --git a/xen/include/asm-x86/hvm/svm/asid.h 
b/xen/include/asm-x86/hvm/svm/asid.h
index d3a144cb6b..60cbb7b881 100644
--- a/xen/include/asm-x86/hvm/svm/asid.h
+++ b/xen/include/asm-x86/hvm/svm/asid.h
@@ -29,7 +29,7 @@ static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned 
long g_vaddr)
 {
 #if 0
     /* Optimization? */
-    svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
+    svm_invlpga(g_vaddr, v->arch.hvm.svm.vmcb->guest_asid);
 #endif
 
     /* Safe fallback. Take a new ASID. */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index c8d0a4e63f..c663155b4b 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -178,7 +178,7 @@ struct hvm_vcpu {
     union {
         struct vmx_vcpu vmx;
         struct svm_vcpu svm;
-    } u;
+    };
 
     struct tasklet      assert_evtchn_irq_tasklet;
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index f964a951ea..76dd04a72d 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -311,7 +311,7 @@ extern u64 vmx_ept_vpid_cap;
 #define cpu_has_vmx_unrestricted_guest \
     (vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST)
 #define vmx_unrestricted_guest(v)               \
-    ((v)->arch.hvm_vmx.secondary_exec_control & \
+    ((v)->arch.hvm.vmx.secondary_exec_control & \
      SECONDARY_EXEC_UNRESTRICTED_GUEST)
 #define cpu_has_vmx_ple \
     (vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.