[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/pv: Rename v->arch.pv_vcpu to v->arch.pv



commit fc5e7213f4f84b28c0557c8dbe16573f76932866
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Aug 28 15:50:27 2018 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Aug 30 10:36:01 2018 +0100

    x86/pv: Rename v->arch.pv_vcpu to v->arch.pv
    
    The trailing _vcpu suffix is redundant, but adds to code volume.  Drop it.
    
    Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
    where applicable.
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/amd.c              |  2 +-
 xen/arch/x86/cpu/intel.c            |  2 +-
 xen/arch/x86/cpuid.c                |  6 +--
 xen/arch/x86/domain.c               | 88 ++++++++++++++++++-------------------
 xen/arch/x86/domain_page.c          |  6 +--
 xen/arch/x86/domctl.c               | 76 ++++++++++++++++----------------
 xen/arch/x86/i387.c                 |  2 +-
 xen/arch/x86/mm.c                   | 10 ++---
 xen/arch/x86/physdev.c              |  9 ++--
 xen/arch/x86/pv/callback.c          | 42 +++++++++---------
 xen/arch/x86/pv/descriptor-tables.c | 18 ++++----
 xen/arch/x86/pv/dom0_build.c        |  4 +-
 xen/arch/x86/pv/domain.c            | 30 ++++++-------
 xen/arch/x86/pv/emul-gate-op.c      |  4 +-
 xen/arch/x86/pv/emul-priv-op.c      | 43 +++++++++---------
 xen/arch/x86/pv/iret.c              | 10 ++---
 xen/arch/x86/pv/misc-hypercalls.c   |  4 +-
 xen/arch/x86/pv/mm.c                | 10 ++---
 xen/arch/x86/pv/traps.c             | 10 ++---
 xen/arch/x86/time.c                 |  2 +-
 xen/arch/x86/traps.c                | 25 +++++------
 xen/arch/x86/x86_64/asm-offsets.c   | 37 +++++++---------
 xen/arch/x86/x86_64/entry.S         |  2 +-
 xen/arch/x86/x86_64/mm.c            | 10 ++---
 xen/arch/x86/x86_64/traps.c         | 10 ++---
 xen/arch/x86/x86_emulate.c          |  2 +-
 xen/include/asm-x86/domain.h        |  2 +-
 xen/include/asm-x86/ldt.h           |  2 +-
 xen/include/asm-x86/pv/traps.h      |  2 +-
 29 files changed, 229 insertions(+), 241 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index e0ee11419b..c394c1c2ec 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -220,7 +220,7 @@ static void amd_ctxt_switch_masking(const struct vcpu *next)
                 * kernel.
                 */
                if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
-                   !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+                   !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
                        val &= ~((uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE) << 
32);
 
                if (unlikely(these_masks->_1cd != val)) {
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 8c375c80f3..65fa3d611f 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -131,7 +131,7 @@ static void intel_ctxt_switch_masking(const struct vcpu 
*next)
                 * kernel.
                 */
                if (next && is_pv_vcpu(next) && !is_idle_vcpu(next) &&
-                   !(next->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE))
+                   !(next->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE))
                        val &= ~(uint64_t)cpufeat_mask(X86_FEATURE_OSXSAVE);
 
                if (unlikely(these_masks->_1cd != val)) {
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 88694ede8e..24366ea35c 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -841,7 +841,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
              *
              * Architecturally, the correct code here is simply:
              *
-             *   if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE )
+             *   if ( v->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE )
              *       c |= cpufeat_mask(X86_FEATURE_OSXSAVE);
              *
              * However because of bugs in Xen (before c/s bd19080b, Nov 2010,
@@ -887,7 +887,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
              *    #UD or #GP is currently being serviced.
              */
             /* OSXSAVE clear in policy.  Fast-forward CR4 back in. */
-            if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE) ||
+            if ( (v->arch.pv.ctrlreg[4] & X86_CR4_OSXSAVE) ||
                  (regs->entry_vector == TRAP_invalid_op &&
                   guest_kernel_mode(v, regs) &&
                   (read_cr4() & X86_CR4_OSXSAVE)) )
@@ -959,7 +959,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
         case 0:
             /* OSPKE clear in policy.  Fast-forward CR4 back in. */
             if ( (is_pv_domain(d)
-                  ? v->arch.pv_vcpu.ctrlreg[4]
+                  ? v->arch.pv.ctrlreg[4]
                   : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
                 res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
             break;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8c7ddf55f5..4cdcd5d64c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -849,7 +849,7 @@ int arch_set_info_guest(
     {
         memcpy(&v->arch.user_regs, &c.nat->user_regs, 
sizeof(c.nat->user_regs));
         if ( is_pv_domain(d) )
-            memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
+            memcpy(v->arch.pv.trap_ctxt, c.nat->trap_ctxt,
                    sizeof(c.nat->trap_ctxt));
     }
     else
@@ -858,7 +858,7 @@ int arch_set_info_guest(
         if ( is_pv_domain(d) )
         {
             for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
-                XLAT_trap_info(v->arch.pv_vcpu.trap_ctxt + i,
+                XLAT_trap_info(v->arch.pv.trap_ctxt + i,
                                c.cmp->trap_ctxt + i);
         }
     }
@@ -873,7 +873,7 @@ int arch_set_info_guest(
     }
 
     /* IOPL privileges are virtualised. */
-    v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
+    v->arch.pv.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
     v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
 
     /* Ensure real hardware interrupts are enabled. */
@@ -884,8 +884,8 @@ int arch_set_info_guest(
         if ( !compat && !(flags & VGCF_in_kernel) && !c.nat->ctrlreg[1] )
             return -EINVAL;
 
-        v->arch.pv_vcpu.ldt_base = c(ldt_base);
-        v->arch.pv_vcpu.ldt_ents = c(ldt_ents);
+        v->arch.pv.ldt_base = c(ldt_base);
+        v->arch.pv.ldt_ents = c(ldt_ents);
     }
     else
     {
@@ -910,47 +910,47 @@ int arch_set_info_guest(
             fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
         }
 
-        for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
-            fail |= v->arch.pv_vcpu.gdt_frames[i] != c(gdt_frames[i]);
-        fail |= v->arch.pv_vcpu.gdt_ents != c(gdt_ents);
+        for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
+            fail |= v->arch.pv.gdt_frames[i] != c(gdt_frames[i]);
+        fail |= v->arch.pv.gdt_ents != c(gdt_ents);
 
-        fail |= v->arch.pv_vcpu.ldt_base != c(ldt_base);
-        fail |= v->arch.pv_vcpu.ldt_ents != c(ldt_ents);
+        fail |= v->arch.pv.ldt_base != c(ldt_base);
+        fail |= v->arch.pv.ldt_ents != c(ldt_ents);
 
         if ( fail )
            return -EOPNOTSUPP;
     }
 
-    v->arch.pv_vcpu.kernel_ss = c(kernel_ss);
-    v->arch.pv_vcpu.kernel_sp = c(kernel_sp);
-    for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.ctrlreg); ++i )
-        v->arch.pv_vcpu.ctrlreg[i] = c(ctrlreg[i]);
+    v->arch.pv.kernel_ss = c(kernel_ss);
+    v->arch.pv.kernel_sp = c(kernel_sp);
+    for ( i = 0; i < ARRAY_SIZE(v->arch.pv.ctrlreg); ++i )
+        v->arch.pv.ctrlreg[i] = c(ctrlreg[i]);
 
-    v->arch.pv_vcpu.event_callback_eip = c(event_callback_eip);
-    v->arch.pv_vcpu.failsafe_callback_eip = c(failsafe_callback_eip);
+    v->arch.pv.event_callback_eip = c(event_callback_eip);
+    v->arch.pv.failsafe_callback_eip = c(failsafe_callback_eip);
     if ( !compat )
     {
-        v->arch.pv_vcpu.syscall_callback_eip = c.nat->syscall_callback_eip;
+        v->arch.pv.syscall_callback_eip = c.nat->syscall_callback_eip;
         /* non-nul selector kills fs_base */
-        v->arch.pv_vcpu.fs_base =
+        v->arch.pv.fs_base =
             !(v->arch.user_regs.fs & ~3) ? c.nat->fs_base : 0;
-        v->arch.pv_vcpu.gs_base_kernel = c.nat->gs_base_kernel;
+        v->arch.pv.gs_base_kernel = c.nat->gs_base_kernel;
         /* non-nul selector kills gs_base_user */
-        v->arch.pv_vcpu.gs_base_user =
+        v->arch.pv.gs_base_user =
             !(v->arch.user_regs.gs & ~3) ? c.nat->gs_base_user : 0;
     }
     else
     {
-        v->arch.pv_vcpu.event_callback_cs = c(event_callback_cs);
-        v->arch.pv_vcpu.failsafe_callback_cs = c(failsafe_callback_cs);
+        v->arch.pv.event_callback_cs = c(event_callback_cs);
+        v->arch.pv.failsafe_callback_cs = c(failsafe_callback_cs);
     }
 
     /* Only CR0.TS is modifiable by guest or admin. */
-    v->arch.pv_vcpu.ctrlreg[0] &= X86_CR0_TS;
-    v->arch.pv_vcpu.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
+    v->arch.pv.ctrlreg[0] &= X86_CR0_TS;
+    v->arch.pv.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
 
-    cr4 = v->arch.pv_vcpu.ctrlreg[4];
-    v->arch.pv_vcpu.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(v, cr4) :
+    cr4 = v->arch.pv.ctrlreg[4];
+    v->arch.pv.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(v, cr4) :
         real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
     memset(v->arch.debugreg, 0, sizeof(v->arch.debugreg));
@@ -1012,10 +1012,10 @@ int arch_set_info_guest(
         rc = (int)pv_set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
     else
     {
-        unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames)];
+        unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv.gdt_frames)];
         unsigned int nr_frames = DIV_ROUND_UP(c.cmp->gdt_ents, 512);
 
-        if ( nr_frames > ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames) )
+        if ( nr_frames > ARRAY_SIZE(v->arch.pv.gdt_frames) )
             return -EINVAL;
 
         for ( i = 0; i < nr_frames; ++i )
@@ -1319,20 +1319,20 @@ static void load_segments(struct vcpu *n)
     if ( !is_pv_32bit_vcpu(n) )
     {
         /* This can only be non-zero if selector is NULL. */
-        if ( n->arch.pv_vcpu.fs_base | (dirty_segment_mask & DIRTY_FS_BASE) )
-            wrfsbase(n->arch.pv_vcpu.fs_base);
+        if ( n->arch.pv.fs_base | (dirty_segment_mask & DIRTY_FS_BASE) )
+            wrfsbase(n->arch.pv.fs_base);
 
         /*
          * Most kernels have non-zero GS base, so don't bother testing.
          * (For old AMD hardware this is also a serialising instruction,
          * avoiding erratum #88.)
          */
-        wrgsshadow(n->arch.pv_vcpu.gs_base_kernel);
+        wrgsshadow(n->arch.pv.gs_base_kernel);
 
         /* This can only be non-zero if selector is NULL. */
-        if ( n->arch.pv_vcpu.gs_base_user |
+        if ( n->arch.pv.gs_base_user |
              (dirty_segment_mask & DIRTY_GS_BASE) )
-            wrgsbase(n->arch.pv_vcpu.gs_base_user);
+            wrgsbase(n->arch.pv.gs_base_user);
 
         /* If in kernel mode then switch the GS bases around. */
         if ( (n->arch.flags & TF_kernel_mode) )
@@ -1341,7 +1341,7 @@ static void load_segments(struct vcpu *n)
 
     if ( unlikely(!all_segs_okay) )
     {
-        struct pv_vcpu *pv = &n->arch.pv_vcpu;
+        struct pv_vcpu *pv = &n->arch.pv;
         struct cpu_user_regs *regs = guest_cpu_user_regs();
         unsigned long *rsp =
             (unsigned long *)(((n->arch.flags & TF_kernel_mode)
@@ -1352,7 +1352,7 @@ static void load_segments(struct vcpu *n)
         rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
         rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
         if ( VM_ASSIST(n->domain, architectural_iopl) )
-            rflags |= n->arch.pv_vcpu.iopl;
+            rflags |= n->arch.pv.iopl;
 
         if ( is_pv_32bit_vcpu(n) )
         {
@@ -1450,11 +1450,11 @@ static void save_segments(struct vcpu *v)
 
     if ( cpu_has_fsgsbase && !is_pv_32bit_vcpu(v) )
     {
-        v->arch.pv_vcpu.fs_base = __rdfsbase();
+        v->arch.pv.fs_base = __rdfsbase();
         if ( v->arch.flags & TF_kernel_mode )
-            v->arch.pv_vcpu.gs_base_kernel = __rdgsbase();
+            v->arch.pv.gs_base_kernel = __rdgsbase();
         else
-            v->arch.pv_vcpu.gs_base_user = __rdgsbase();
+            v->arch.pv.gs_base_user = __rdgsbase();
     }
 
     if ( regs->ds )
@@ -1468,9 +1468,9 @@ static void save_segments(struct vcpu *v)
         dirty_segment_mask |= DIRTY_FS;
         /* non-nul selector kills fs_base */
         if ( regs->fs & ~3 )
-            v->arch.pv_vcpu.fs_base = 0;
+            v->arch.pv.fs_base = 0;
     }
-    if ( v->arch.pv_vcpu.fs_base )
+    if ( v->arch.pv.fs_base )
         dirty_segment_mask |= DIRTY_FS_BASE;
 
     if ( regs->gs || is_pv_32bit_vcpu(v) )
@@ -1478,10 +1478,10 @@ static void save_segments(struct vcpu *v)
         dirty_segment_mask |= DIRTY_GS;
         /* non-nul selector kills gs_base_user */
         if ( regs->gs & ~3 )
-            v->arch.pv_vcpu.gs_base_user = 0;
+            v->arch.pv.gs_base_user = 0;
     }
-    if ( v->arch.flags & TF_kernel_mode ? v->arch.pv_vcpu.gs_base_kernel
-                                        : v->arch.pv_vcpu.gs_base_user )
+    if ( v->arch.flags & TF_kernel_mode ? v->arch.pv.gs_base_kernel
+                                        : v->arch.pv.gs_base_user )
         dirty_segment_mask |= DIRTY_GS_BASE;
 
     this_cpu(dirty_segment_mask) = dirty_segment_mask;
@@ -1571,7 +1571,7 @@ static void _update_runstate_area(struct vcpu *v)
 {
     if ( !update_runstate_area(v) && is_pv_vcpu(v) &&
          !(v->arch.flags & TF_kernel_mode) )
-        v->arch.pv_vcpu.need_update_runstate_area = 1;
+        v->arch.pv.need_update_runstate_area = 1;
 }
 
 static inline bool need_full_gdt(const struct domain *d)
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 735f65ada7..4a07cfb18e 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -86,7 +86,7 @@ void *map_domain_page(mfn_t mfn)
         return mfn_to_virt(mfn_x(mfn));
 
     dcache = &v->domain->arch.pv.mapcache;
-    vcache = &v->arch.pv_vcpu.mapcache;
+    vcache = &v->arch.pv.mapcache;
     if ( !dcache->inuse )
         return mfn_to_virt(mfn_x(mfn));
 
@@ -194,7 +194,7 @@ void unmap_domain_page(const void *ptr)
 
     idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
     mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx));
-    hashent = &v->arch.pv_vcpu.mapcache.hash[MAPHASH_HASHFN(mfn)];
+    hashent = &v->arch.pv.mapcache.hash[MAPHASH_HASHFN(mfn)];
 
     local_irq_save(flags);
 
@@ -293,7 +293,7 @@ int mapcache_vcpu_init(struct vcpu *v)
     BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES);
     for ( i = 0; i < MAPHASH_ENTRIES; i++ )
     {
-        struct vcpu_maphash_entry *hashent = &v->arch.pv_vcpu.mapcache.hash[i];
+        struct vcpu_maphash_entry *hashent = &v->arch.pv.mapcache.hash[i];
 
         hashent->mfn = ~0UL; /* never valid to map */
         hashent->idx = MAPHASHENT_NOTINUSE;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index e27e971a6d..fdbcce0db2 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -856,17 +856,17 @@ long arch_do_domctl(
             if ( is_pv_domain(d) )
             {
                 evc->sysenter_callback_cs      =
-                    v->arch.pv_vcpu.sysenter_callback_cs;
+                    v->arch.pv.sysenter_callback_cs;
                 evc->sysenter_callback_eip     =
-                    v->arch.pv_vcpu.sysenter_callback_eip;
+                    v->arch.pv.sysenter_callback_eip;
                 evc->sysenter_disables_events  =
-                    v->arch.pv_vcpu.sysenter_disables_events;
+                    v->arch.pv.sysenter_disables_events;
                 evc->syscall32_callback_cs     =
-                    v->arch.pv_vcpu.syscall32_callback_cs;
+                    v->arch.pv.syscall32_callback_cs;
                 evc->syscall32_callback_eip    =
-                    v->arch.pv_vcpu.syscall32_callback_eip;
+                    v->arch.pv.syscall32_callback_eip;
                 evc->syscall32_disables_events =
-                    v->arch.pv_vcpu.syscall32_disables_events;
+                    v->arch.pv.syscall32_disables_events;
             }
             else
             {
@@ -900,18 +900,18 @@ long arch_do_domctl(
                     break;
                 domain_pause(d);
                 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
-                v->arch.pv_vcpu.sysenter_callback_cs      =
+                v->arch.pv.sysenter_callback_cs =
                     evc->sysenter_callback_cs;
-                v->arch.pv_vcpu.sysenter_callback_eip     =
+                v->arch.pv.sysenter_callback_eip =
                     evc->sysenter_callback_eip;
-                v->arch.pv_vcpu.sysenter_disables_events  =
+                v->arch.pv.sysenter_disables_events =
                     evc->sysenter_disables_events;
                 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
-                v->arch.pv_vcpu.syscall32_callback_cs     =
+                v->arch.pv.syscall32_callback_cs =
                     evc->syscall32_callback_cs;
-                v->arch.pv_vcpu.syscall32_callback_eip    =
+                v->arch.pv.syscall32_callback_eip =
                     evc->syscall32_callback_eip;
-                v->arch.pv_vcpu.syscall32_disables_events =
+                v->arch.pv.syscall32_disables_events =
                     evc->syscall32_disables_events;
             }
             else if ( (evc->sysenter_callback_cs & ~3) ||
@@ -1330,12 +1330,12 @@ long arch_do_domctl(
 
                 if ( boot_cpu_has(X86_FEATURE_DBEXT) )
                 {
-                    if ( v->arch.pv_vcpu.dr_mask[0] )
+                    if ( v->arch.pv.dr_mask[0] )
                     {
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
-                            msr.value = v->arch.pv_vcpu.dr_mask[0];
+                            msr.value = v->arch.pv.dr_mask[0];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) 
)
                                 ret = -EFAULT;
                         }
@@ -1344,12 +1344,12 @@ long arch_do_domctl(
 
                     for ( j = 0; j < 3; ++j )
                     {
-                        if ( !v->arch.pv_vcpu.dr_mask[1 + j] )
+                        if ( !v->arch.pv.dr_mask[1 + j] )
                             continue;
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
-                            msr.value = v->arch.pv_vcpu.dr_mask[1 + j];
+                            msr.value = v->arch.pv.dr_mask[1 + j];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) 
)
                                 ret = -EFAULT;
                         }
@@ -1394,7 +1394,7 @@ long arch_do_domctl(
                     if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
                          (msr.value >> 32) )
                         break;
-                    v->arch.pv_vcpu.dr_mask[0] = msr.value;
+                    v->arch.pv.dr_mask[0] = msr.value;
                     continue;
 
                 case MSR_AMD64_DR1_ADDRESS_MASK ...
@@ -1403,7 +1403,7 @@ long arch_do_domctl(
                          (msr.value >> 32) )
                         break;
                     msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
-                    v->arch.pv_vcpu.dr_mask[msr.index] = msr.value;
+                    v->arch.pv.dr_mask[msr.index] = msr.value;
                     continue;
                 }
                 break;
@@ -1564,7 +1564,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
     {
         memcpy(&c.nat->user_regs, &v->arch.user_regs, 
sizeof(c.nat->user_regs));
         if ( is_pv_domain(d) )
-            memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
+            memcpy(c.nat->trap_ctxt, v->arch.pv.trap_ctxt,
                    sizeof(c.nat->trap_ctxt));
     }
     else
@@ -1574,7 +1574,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
         {
             for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); ++i )
                 XLAT_trap_info(c.cmp->trap_ctxt + i,
-                               v->arch.pv_vcpu.trap_ctxt + i);
+                               v->arch.pv.trap_ctxt + i);
         }
     }
 
@@ -1615,37 +1615,37 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
     }
     else
     {
-        c(ldt_base = v->arch.pv_vcpu.ldt_base);
-        c(ldt_ents = v->arch.pv_vcpu.ldt_ents);
-        for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
-            c(gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
+        c(ldt_base = v->arch.pv.ldt_base);
+        c(ldt_ents = v->arch.pv.ldt_ents);
+        for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
+            c(gdt_frames[i] = v->arch.pv.gdt_frames[i]);
         BUILD_BUG_ON(ARRAY_SIZE(c.nat->gdt_frames) !=
                      ARRAY_SIZE(c.cmp->gdt_frames));
         for ( ; i < ARRAY_SIZE(c.nat->gdt_frames); ++i )
             c(gdt_frames[i] = 0);
-        c(gdt_ents = v->arch.pv_vcpu.gdt_ents);
-        c(kernel_ss = v->arch.pv_vcpu.kernel_ss);
-        c(kernel_sp = v->arch.pv_vcpu.kernel_sp);
-        for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.ctrlreg); ++i )
-            c(ctrlreg[i] = v->arch.pv_vcpu.ctrlreg[i]);
-        c(event_callback_eip = v->arch.pv_vcpu.event_callback_eip);
-        c(failsafe_callback_eip = v->arch.pv_vcpu.failsafe_callback_eip);
+        c(gdt_ents = v->arch.pv.gdt_ents);
+        c(kernel_ss = v->arch.pv.kernel_ss);
+        c(kernel_sp = v->arch.pv.kernel_sp);
+        for ( i = 0; i < ARRAY_SIZE(v->arch.pv.ctrlreg); ++i )
+            c(ctrlreg[i] = v->arch.pv.ctrlreg[i]);
+        c(event_callback_eip = v->arch.pv.event_callback_eip);
+        c(failsafe_callback_eip = v->arch.pv.failsafe_callback_eip);
         if ( !compat )
         {
-            c.nat->syscall_callback_eip = v->arch.pv_vcpu.syscall_callback_eip;
-            c.nat->fs_base = v->arch.pv_vcpu.fs_base;
-            c.nat->gs_base_kernel = v->arch.pv_vcpu.gs_base_kernel;
-            c.nat->gs_base_user = v->arch.pv_vcpu.gs_base_user;
+            c.nat->syscall_callback_eip = v->arch.pv.syscall_callback_eip;
+            c.nat->fs_base = v->arch.pv.fs_base;
+            c.nat->gs_base_kernel = v->arch.pv.gs_base_kernel;
+            c.nat->gs_base_user = v->arch.pv.gs_base_user;
         }
         else
         {
-            c(event_callback_cs = v->arch.pv_vcpu.event_callback_cs);
-            c(failsafe_callback_cs = v->arch.pv_vcpu.failsafe_callback_cs);
+            c(event_callback_cs = v->arch.pv.event_callback_cs);
+            c(failsafe_callback_cs = v->arch.pv.failsafe_callback_cs);
         }
 
         /* IOPL privileges are virtualised: merge back into returned eflags. */
         BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
-        c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
+        c(user_regs.eflags |= v->arch.pv.iopl);
 
         if ( !compat )
         {
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 00cf6bd370..88178485cb 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -233,7 +233,7 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool 
need_stts)
         v->fpu_dirtied = 1;
 
         /* Xen doesn't need TS set, but the guest might. */
-        need_stts = is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS);
+        need_stts = is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS);
     }
     else
     {
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index cb0fb570c5..7da9a0429b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -510,7 +510,7 @@ unsigned long pv_guest_cr4_to_real_cr4(const struct vcpu *v)
     const struct domain *d = v->domain;
     unsigned long cr4;
 
-    cr4 = v->arch.pv_vcpu.ctrlreg[4] & ~X86_CR4_DE;
+    cr4 = v->arch.pv.ctrlreg[4] & ~X86_CR4_DE;
     cr4 |= mmu_cr4_features & (X86_CR4_PSE | X86_CR4_SMEP | X86_CR4_SMAP |
                                X86_CR4_OSXSAVE | X86_CR4_FSGSBASE);
 
@@ -3471,14 +3471,14 @@ long do_mmuext_op(
                          "Bad args to SET_LDT: ptr=%lx, ents=%x\n", ptr, ents);
                 rc = -EINVAL;
             }
-            else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
-                      (curr->arch.pv_vcpu.ldt_base != ptr) )
+            else if ( (curr->arch.pv.ldt_ents != ents) ||
+                      (curr->arch.pv.ldt_base != ptr) )
             {
                 if ( pv_destroy_ldt(curr) )
                     flush_tlb_local();
 
-                curr->arch.pv_vcpu.ldt_base = ptr;
-                curr->arch.pv_vcpu.ldt_ents = ents;
+                curr->arch.pv.ldt_base = ptr;
+                curr->arch.pv.ldt_ents = ents;
                 load_LDT(curr);
             }
             break;
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index b87ec9034c..4524823443 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -412,7 +412,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
         if ( set_iopl.iopl > 3 )
             break;
         ret = 0;
-        curr->arch.pv_vcpu.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL);
+        curr->arch.pv.iopl = MASK_INSR(set_iopl.iopl, X86_EFLAGS_IOPL);
         break;
     }
 
@@ -429,12 +429,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
             break;
         ret = 0;
 #ifndef COMPAT
-        curr->arch.pv_vcpu.iobmp = set_iobitmap.bitmap;
+        curr->arch.pv.iobmp = set_iobitmap.bitmap;
 #else
-        guest_from_compat_handle(curr->arch.pv_vcpu.iobmp,
-                                 set_iobitmap.bitmap);
+        guest_from_compat_handle(curr->arch.pv.iobmp, set_iobitmap.bitmap);
 #endif
-        curr->arch.pv_vcpu.iobmp_limit = set_iobitmap.nr_ports;
+        curr->arch.pv.iobmp_limit = set_iobitmap.nr_ports;
         break;
     }
 
diff --git a/xen/arch/x86/pv/callback.c b/xen/arch/x86/pv/callback.c
index 394726a197..acfd1c70d8 100644
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -35,7 +35,7 @@ static int register_guest_nmi_callback(unsigned long address)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
-    struct trap_info *t = &curr->arch.pv_vcpu.trap_ctxt[TRAP_nmi];
+    struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
 
     if ( !is_canonical_address(address) )
         return -EINVAL;
@@ -60,7 +60,7 @@ static int register_guest_nmi_callback(unsigned long address)
 static void unregister_guest_nmi_callback(void)
 {
     struct vcpu *curr = current;
-    struct trap_info *t = &curr->arch.pv_vcpu.trap_ctxt[TRAP_nmi];
+    struct trap_info *t = &curr->arch.pv.trap_ctxt[TRAP_nmi];
 
     memset(t, 0, sizeof(*t));
 }
@@ -76,11 +76,11 @@ static long register_guest_callback(struct 
callback_register *reg)
     switch ( reg->type )
     {
     case CALLBACKTYPE_event:
-        curr->arch.pv_vcpu.event_callback_eip    = reg->address;
+        curr->arch.pv.event_callback_eip = reg->address;
         break;
 
     case CALLBACKTYPE_failsafe:
-        curr->arch.pv_vcpu.failsafe_callback_eip = reg->address;
+        curr->arch.pv.failsafe_callback_eip = reg->address;
         if ( reg->flags & CALLBACKF_mask_events )
             curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
         else
@@ -88,7 +88,7 @@ static long register_guest_callback(struct callback_register 
*reg)
         break;
 
     case CALLBACKTYPE_syscall:
-        curr->arch.pv_vcpu.syscall_callback_eip  = reg->address;
+        curr->arch.pv.syscall_callback_eip = reg->address;
         if ( reg->flags & CALLBACKF_mask_events )
             curr->arch.vgc_flags |= VGCF_syscall_disables_events;
         else
@@ -96,14 +96,14 @@ static long register_guest_callback(struct 
callback_register *reg)
         break;
 
     case CALLBACKTYPE_syscall32:
-        curr->arch.pv_vcpu.syscall32_callback_eip = reg->address;
-        curr->arch.pv_vcpu.syscall32_disables_events =
+        curr->arch.pv.syscall32_callback_eip = reg->address;
+        curr->arch.pv.syscall32_disables_events =
             !!(reg->flags & CALLBACKF_mask_events);
         break;
 
     case CALLBACKTYPE_sysenter:
-        curr->arch.pv_vcpu.sysenter_callback_eip = reg->address;
-        curr->arch.pv_vcpu.sysenter_disables_events =
+        curr->arch.pv.sysenter_callback_eip = reg->address;
+        curr->arch.pv.sysenter_disables_events =
             !!(reg->flags & CALLBACKF_mask_events);
         break;
 
@@ -218,13 +218,13 @@ static long compat_register_guest_callback(struct 
compat_callback_register *reg)
     switch ( reg->type )
     {
     case CALLBACKTYPE_event:
-        curr->arch.pv_vcpu.event_callback_cs     = reg->address.cs;
-        curr->arch.pv_vcpu.event_callback_eip    = reg->address.eip;
+        curr->arch.pv.event_callback_cs = reg->address.cs;
+        curr->arch.pv.event_callback_eip = reg->address.eip;
         break;
 
     case CALLBACKTYPE_failsafe:
-        curr->arch.pv_vcpu.failsafe_callback_cs  = reg->address.cs;
-        curr->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
+        curr->arch.pv.failsafe_callback_cs = reg->address.cs;
+        curr->arch.pv.failsafe_callback_eip = reg->address.eip;
         if ( reg->flags & CALLBACKF_mask_events )
             curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
         else
@@ -232,16 +232,16 @@ static long compat_register_guest_callback(struct 
compat_callback_register *reg)
         break;
 
     case CALLBACKTYPE_syscall32:
-        curr->arch.pv_vcpu.syscall32_callback_cs     = reg->address.cs;
-        curr->arch.pv_vcpu.syscall32_callback_eip    = reg->address.eip;
-        curr->arch.pv_vcpu.syscall32_disables_events =
+        curr->arch.pv.syscall32_callback_cs = reg->address.cs;
+        curr->arch.pv.syscall32_callback_eip = reg->address.eip;
+        curr->arch.pv.syscall32_disables_events =
             (reg->flags & CALLBACKF_mask_events) != 0;
         break;
 
     case CALLBACKTYPE_sysenter:
-        curr->arch.pv_vcpu.sysenter_callback_cs     = reg->address.cs;
-        curr->arch.pv_vcpu.sysenter_callback_eip    = reg->address.eip;
-        curr->arch.pv_vcpu.sysenter_disables_events =
+        curr->arch.pv.sysenter_callback_cs = reg->address.cs;
+        curr->arch.pv.sysenter_callback_eip = reg->address.eip;
+        curr->arch.pv.sysenter_disables_events =
             (reg->flags & CALLBACKF_mask_events) != 0;
         break;
 
@@ -352,7 +352,7 @@ long 
do_set_trap_table(XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps)
 {
     struct trap_info cur;
     struct vcpu *curr = current;
-    struct trap_info *dst = curr->arch.pv_vcpu.trap_ctxt;
+    struct trap_info *dst = curr->arch.pv.trap_ctxt;
     long rc = 0;
 
     /* If no table is presented then clear the entire virtual IDT. */
@@ -397,7 +397,7 @@ int 
compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
 {
     struct vcpu *curr = current;
     struct compat_trap_info cur;
-    struct trap_info *dst = curr->arch.pv_vcpu.trap_ctxt;
+    struct trap_info *dst = curr->arch.pv.trap_ctxt;
     long rc = 0;
 
     /* If no table is presented then clear the entire virtual IDT. */
diff --git a/xen/arch/x86/pv/descriptor-tables.c 
b/xen/arch/x86/pv/descriptor-tables.c
index 71bf92713e..9b84cbe42f 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -37,9 +37,9 @@ bool pv_destroy_ldt(struct vcpu *v)
 
     ASSERT(!in_irq());
 
-    spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
+    spin_lock(&v->arch.pv.shadow_ldt_lock);
 
-    if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
+    if ( v->arch.pv.shadow_ldt_mapcnt == 0 )
         goto out;
 
     pl1e = pv_ldt_ptes(v);
@@ -58,11 +58,11 @@ bool pv_destroy_ldt(struct vcpu *v)
         put_page_and_type(page);
     }
 
-    ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
-    v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
+    ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped);
+    v->arch.pv.shadow_ldt_mapcnt = 0;
 
  out:
-    spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+    spin_unlock(&v->arch.pv.shadow_ldt_lock);
 
     return mappings_dropped;
 }
@@ -74,7 +74,7 @@ void pv_destroy_gdt(struct vcpu *v)
     l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
     unsigned int i;
 
-    v->arch.pv_vcpu.gdt_ents = 0;
+    v->arch.pv.gdt_ents = 0;
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
         mfn_t mfn = l1e_get_mfn(pl1e[i]);
@@ -84,7 +84,7 @@ void pv_destroy_gdt(struct vcpu *v)
             put_page_and_type(mfn_to_page(mfn));
 
         l1e_write(&pl1e[i], zero_l1e);
-        v->arch.pv_vcpu.gdt_frames[i] = 0;
+        v->arch.pv.gdt_frames[i] = 0;
     }
 }
 
@@ -117,11 +117,11 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, 
unsigned int entries)
     pv_destroy_gdt(v);
 
     /* Install the new GDT. */
-    v->arch.pv_vcpu.gdt_ents = entries;
+    v->arch.pv.gdt_ents = entries;
     pl1e = pv_gdt_ptes(v);
     for ( i = 0; i < nr_frames; i++ )
     {
-        v->arch.pv_vcpu.gdt_frames[i] = frames[i];
+        v->arch.pv.gdt_frames[i] = frames[i];
         l1e_write(&pl1e[i], l1e_from_pfn(frames[i], __PAGE_HYPERVISOR_RW));
     }
 
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 078288bd4f..96ff0eee5b 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -589,8 +589,8 @@ int __init dom0_construct_pv(struct domain *d,
 
     if ( is_pv_32bit_domain(d) )
     {
-        v->arch.pv_vcpu.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
-        v->arch.pv_vcpu.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
+        v->arch.pv.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
+        v->arch.pv.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 022831a7b0..ce50dacb5f 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -161,8 +161,7 @@ void pv_vcpu_destroy(struct vcpu *v)
     }
 
     pv_destroy_gdt_ldt_l1tab(v);
-    xfree(v->arch.pv_vcpu.trap_ctxt);
-    v->arch.pv_vcpu.trap_ctxt = NULL;
+    XFREE(v->arch.pv.trap_ctxt);
 }
 
 int pv_vcpu_initialise(struct vcpu *v)
@@ -172,17 +171,16 @@ int pv_vcpu_initialise(struct vcpu *v)
 
     ASSERT(!is_idle_domain(d));
 
-    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
+    spin_lock_init(&v->arch.pv.shadow_ldt_lock);
 
     rc = pv_create_gdt_ldt_l1tab(v);
     if ( rc )
         return rc;
 
-    BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
+    BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) >
                  PAGE_SIZE);
-    v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
-                                              NR_VECTORS);
-    if ( !v->arch.pv_vcpu.trap_ctxt )
+    v->arch.pv.trap_ctxt = xzalloc_array(struct trap_info, NR_VECTORS);
+    if ( !v->arch.pv.trap_ctxt )
     {
         rc = -ENOMEM;
         goto done;
@@ -191,7 +189,7 @@ int pv_vcpu_initialise(struct vcpu *v)
     /* PV guests by default have a 100Hz ticker. */
     v->periodic_period = MILLISECS(10);
 
-    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
+    v->arch.pv.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
 
     if ( is_pv_32bit_domain(d) )
     {
@@ -314,14 +312,12 @@ static void _toggle_guest_pt(struct vcpu *v)
     if ( !(v->arch.flags & TF_kernel_mode) )
         return;
 
-    if ( v->arch.pv_vcpu.need_update_runstate_area &&
-         update_runstate_area(v) )
-        v->arch.pv_vcpu.need_update_runstate_area = 0;
+    if ( v->arch.pv.need_update_runstate_area && update_runstate_area(v) )
+        v->arch.pv.need_update_runstate_area = 0;
 
-    if ( v->arch.pv_vcpu.pending_system_time.version &&
-         update_secondary_system_time(v,
-                                      &v->arch.pv_vcpu.pending_system_time) )
-        v->arch.pv_vcpu.pending_system_time.version = 0;
+    if ( v->arch.pv.pending_system_time.version &&
+         update_secondary_system_time(v, &v->arch.pv.pending_system_time) )
+        v->arch.pv.pending_system_time.version = 0;
 }
 
 void toggle_guest_mode(struct vcpu *v)
@@ -331,9 +327,9 @@ void toggle_guest_mode(struct vcpu *v)
     if ( cpu_has_fsgsbase )
     {
         if ( v->arch.flags & TF_kernel_mode )
-            v->arch.pv_vcpu.gs_base_kernel = __rdgsbase();
+            v->arch.pv.gs_base_kernel = __rdgsbase();
         else
-            v->arch.pv_vcpu.gs_base_user = __rdgsbase();
+            v->arch.pv.gs_base_user = __rdgsbase();
     }
     asm volatile ( "swapgs" );
 
diff --git a/xen/arch/x86/pv/emul-gate-op.c b/xen/arch/x86/pv/emul-gate-op.c
index 810c4f7d8c..d1c8aa6f7b 100644
--- a/xen/arch/x86/pv/emul-gate-op.c
+++ b/xen/arch/x86/pv/emul-gate-op.c
@@ -324,8 +324,8 @@ void pv_emulate_gate_op(struct cpu_user_regs *regs)
                 pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
                 return;
             }
-            esp = v->arch.pv_vcpu.kernel_sp;
-            ss = v->arch.pv_vcpu.kernel_ss;
+            esp = v->arch.pv.kernel_sp;
+            ss = v->arch.pv.kernel_ss;
             if ( (ss & 3) != (sel & 3) ||
                  !pv_emul_read_descriptor(ss, v, &base, &limit, &ar, 0) ||
                  ((ar >> 13) & 3) != (sel & 3) ||
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 84f22ae988..45941ea0db 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -111,9 +111,9 @@ static bool iopl_ok(const struct vcpu *v, const struct 
cpu_user_regs *regs)
     unsigned int cpl = guest_kernel_mode(v, regs) ?
         (VM_ASSIST(v->domain, architectural_iopl) ? 0 : 1) : 3;
 
-    ASSERT((v->arch.pv_vcpu.iopl & ~X86_EFLAGS_IOPL) == 0);
+    ASSERT((v->arch.pv.iopl & ~X86_EFLAGS_IOPL) == 0);
 
-    return IOPL(cpl) <= v->arch.pv_vcpu.iopl;
+    return IOPL(cpl) <= v->arch.pv.iopl;
 }
 
 /* Has the guest requested sufficient permission for this I/O access? */
@@ -126,7 +126,7 @@ static bool guest_io_okay(unsigned int port, unsigned int 
bytes,
     if ( iopl_ok(v, regs) )
         return true;
 
-    if ( (port + bytes) <= v->arch.pv_vcpu.iobmp_limit )
+    if ( (port + bytes) <= v->arch.pv.iobmp_limit )
     {
         union { uint8_t bytes[2]; uint16_t mask; } x;
 
@@ -137,7 +137,7 @@ static bool guest_io_okay(unsigned int port, unsigned int 
bytes,
         if ( user_mode )
             toggle_guest_pt(v);
 
-        switch ( __copy_from_guest_offset(x.bytes, v->arch.pv_vcpu.iobmp,
+        switch ( __copy_from_guest_offset(x.bytes, v->arch.pv.iobmp,
                                           port>>3, 2) )
         {
         default: x.bytes[0] = ~0;
@@ -286,8 +286,7 @@ static unsigned int check_guest_io_breakpoint(struct vcpu 
*v,
     unsigned int width, i, match = 0;
     unsigned long start;
 
-    if ( !(v->arch.debugreg[5]) ||
-         !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+    if ( !(v->arch.debugreg[5]) || !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
         return 0;
 
     for ( i = 0; i < 4; i++ )
@@ -701,12 +700,12 @@ static int read_cr(unsigned int reg, unsigned long *val,
     switch ( reg )
     {
     case 0: /* Read CR0 */
-        *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv_vcpu.ctrlreg[0];
+        *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv.ctrlreg[0];
         return X86EMUL_OKAY;
 
     case 2: /* Read CR2 */
     case 4: /* Read CR4 */
-        *val = curr->arch.pv_vcpu.ctrlreg[reg];
+        *val = curr->arch.pv.ctrlreg[reg];
         return X86EMUL_OKAY;
 
     case 3: /* Read CR3 */
@@ -755,7 +754,7 @@ static int write_cr(unsigned int reg, unsigned long val,
         return X86EMUL_OKAY;
 
     case 2: /* Write CR2 */
-        curr->arch.pv_vcpu.ctrlreg[2] = val;
+        curr->arch.pv.ctrlreg[2] = val;
         arch_set_cr2(curr, val);
         return X86EMUL_OKAY;
 
@@ -785,7 +784,7 @@ static int write_cr(unsigned int reg, unsigned long val,
     }
 
     case 4: /* Write CR4 */
-        curr->arch.pv_vcpu.ctrlreg[4] = pv_guest_cr4_fixup(curr, val);
+        curr->arch.pv.ctrlreg[4] = pv_guest_cr4_fixup(curr, val);
         write_cr4(pv_guest_cr4_to_real_cr4(curr));
         ctxt_switch_levelling(curr);
         return X86EMUL_OKAY;
@@ -834,20 +833,20 @@ static int read_msr(unsigned int reg, uint64_t *val,
     case MSR_FS_BASE:
         if ( is_pv_32bit_domain(currd) )
             break;
-        *val = cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv_vcpu.fs_base;
+        *val = cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv.fs_base;
         return X86EMUL_OKAY;
 
     case MSR_GS_BASE:
         if ( is_pv_32bit_domain(currd) )
             break;
         *val = cpu_has_fsgsbase ? __rdgsbase()
-                                : curr->arch.pv_vcpu.gs_base_kernel;
+                                : curr->arch.pv.gs_base_kernel;
         return X86EMUL_OKAY;
 
     case MSR_SHADOW_GS_BASE:
         if ( is_pv_32bit_domain(currd) )
             break;
-        *val = curr->arch.pv_vcpu.gs_base_user;
+        *val = curr->arch.pv.gs_base_user;
         return X86EMUL_OKAY;
 
     /*
@@ -918,13 +917,13 @@ static int read_msr(unsigned int reg, uint64_t *val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv_vcpu.dr_mask[0];
+        *val = curr->arch.pv.dr_mask[0];
         return X86EMUL_OKAY;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 
1];
+        *val = curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         return X86EMUL_OKAY;
 
     case MSR_IA32_PERF_CAPABILITIES:
@@ -996,21 +995,21 @@ static int write_msr(unsigned int reg, uint64_t val,
         if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
             break;
         wrfsbase(val);
-        curr->arch.pv_vcpu.fs_base = val;
+        curr->arch.pv.fs_base = val;
         return X86EMUL_OKAY;
 
     case MSR_GS_BASE:
         if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
             break;
         wrgsbase(val);
-        curr->arch.pv_vcpu.gs_base_kernel = val;
+        curr->arch.pv.gs_base_kernel = val;
         return X86EMUL_OKAY;
 
     case MSR_SHADOW_GS_BASE:
         if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
             break;
         wrgsshadow(val);
-        curr->arch.pv_vcpu.gs_base_user = val;
+        curr->arch.pv.gs_base_user = val;
         return X86EMUL_OKAY;
 
     case MSR_K7_FID_VID_STATUS:
@@ -1115,7 +1114,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv_vcpu.dr_mask[0] = val;
+        curr->arch.pv.dr_mask[0] = val;
         if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
             wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);
         return X86EMUL_OKAY;
@@ -1123,7 +1122,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
+        curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
         if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
             wrmsrl(reg, val);
         return X86EMUL_OKAY;
@@ -1327,7 +1326,7 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs)
     else
         regs->eflags |= X86_EFLAGS_IF;
     ASSERT(!(regs->eflags & X86_EFLAGS_IOPL));
-    regs->eflags |= curr->arch.pv_vcpu.iopl;
+    regs->eflags |= curr->arch.pv.iopl;
     eflags = regs->eflags;
 
     ctxt.ctxt.addr_size = ar & _SEGMENT_L ? 64 : ar & _SEGMENT_DB ? 32 : 16;
@@ -1369,7 +1368,7 @@ int pv_emulate_privileged_op(struct cpu_user_regs *regs)
         if ( ctxt.bpmatch )
         {
             curr->arch.debugreg[6] |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
-            if ( !(curr->arch.pv_vcpu.trap_bounce.flags & TBF_EXCEPTION) )
+            if ( !(curr->arch.pv.trap_bounce.flags & TBF_EXCEPTION) )
                 pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
         }
         /* fall through */
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index ca433a69c4..c359a1dbfd 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -51,7 +51,7 @@ unsigned long do_iret(void)
     }
 
     if ( VM_ASSIST(v->domain, architectural_iopl) )
-        v->arch.pv_vcpu.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;
+        v->arch.pv.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;
 
     regs->rip    = iret_saved.rip;
     regs->cs     = iret_saved.cs | 3; /* force guest privilege */
@@ -115,7 +115,7 @@ unsigned int compat_iret(void)
     }
 
     if ( VM_ASSIST(v->domain, architectural_iopl) )
-        v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
+        v->arch.pv.iopl = eflags & X86_EFLAGS_IOPL;
 
     regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
 
@@ -130,7 +130,7 @@ unsigned int compat_iret(void)
          * mode frames).
          */
         const struct trap_info *ti;
-        u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
+        u32 x, ksp = v->arch.pv.kernel_sp - 40;
         unsigned int i;
         int rc = 0;
 
@@ -158,9 +158,9 @@ unsigned int compat_iret(void)
             return 0;
         }
         regs->esp = ksp;
-        regs->ss = v->arch.pv_vcpu.kernel_ss;
+        regs->ss = v->arch.pv.kernel_ss;
 
-        ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+        ti = &v->arch.pv.trap_ctxt[TRAP_gp_fault];
         if ( TI_GET_IF(ti) )
             eflags &= ~X86_EFLAGS_IF;
         regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
diff --git a/xen/arch/x86/pv/misc-hypercalls.c 
b/xen/arch/x86/pv/misc-hypercalls.c
index 1619be7874..9f61f3db3c 100644
--- a/xen/arch/x86/pv/misc-hypercalls.c
+++ b/xen/arch/x86/pv/misc-hypercalls.c
@@ -42,12 +42,12 @@ long do_fpu_taskswitch(int set)
 
     if ( set )
     {
-        v->arch.pv_vcpu.ctrlreg[0] |= X86_CR0_TS;
+        v->arch.pv.ctrlreg[0] |= X86_CR0_TS;
         stts();
     }
     else
     {
-        v->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
+        v->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
         if ( v->fpu_dirtied )
             clts();
     }
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index b46fd94c2c..e9156eaf4c 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -87,7 +87,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
     struct domain *currd = curr->domain;
     struct page_info *page;
     l1_pgentry_t gl1e, *pl1e;
-    unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
+    unsigned long linear = curr->arch.pv.ldt_base + offset;
 
     BUG_ON(unlikely(in_irq()));
 
@@ -97,7 +97,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
      * current vcpu, and vcpu_reset() will block until this vcpu has been
      * descheduled before continuing.
      */
-    ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
+    ASSERT((offset >> 3) <= curr->arch.pv.ldt_ents);
 
     if ( is_pv_32bit_domain(currd) )
         linear = (uint32_t)linear;
@@ -119,10 +119,10 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
     pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
     l1e_add_flags(gl1e, _PAGE_RW);
 
-    spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+    spin_lock(&curr->arch.pv.shadow_ldt_lock);
     l1e_write(pl1e, gl1e);
-    curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
-    spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+    curr->arch.pv.shadow_ldt_mapcnt++;
+    spin_unlock(&curr->arch.pv.shadow_ldt_lock);
 
     return true;
 }
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index f48db92243..1740784ff2 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -63,8 +63,8 @@ void pv_inject_event(const struct x86_event *event)
     else
         ASSERT(error_code == X86_EVENT_NO_EC);
 
-    tb = &curr->arch.pv_vcpu.trap_bounce;
-    ti = &curr->arch.pv_vcpu.trap_ctxt[vector];
+    tb = &curr->arch.pv.trap_bounce;
+    ti = &curr->arch.pv.trap_ctxt[vector];
 
     tb->flags = TBF_EXCEPTION;
     tb->cs    = ti->cs;
@@ -73,7 +73,7 @@ void pv_inject_event(const struct x86_event *event)
     if ( event->type == X86_EVENTTYPE_HW_EXCEPTION &&
          vector == TRAP_page_fault )
     {
-        curr->arch.pv_vcpu.ctrlreg[2] = event->cr2;
+        curr->arch.pv.ctrlreg[2] = event->cr2;
         arch_set_cr2(curr, event->cr2);
 
         /* Re-set error_code.user flag appropriately for the guest. */
@@ -113,7 +113,7 @@ void pv_inject_event(const struct x86_event *event)
 bool set_guest_machinecheck_trapbounce(void)
 {
     struct vcpu *curr = current;
-    struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
+    struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
 
     pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
     tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
@@ -128,7 +128,7 @@ bool set_guest_machinecheck_trapbounce(void)
 bool set_guest_nmi_trapbounce(void)
 {
     struct vcpu *curr = current;
-    struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
+    struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
 
     pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC);
     tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 536449b264..69e9aaf632 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1099,7 +1099,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int 
force)
 
     if ( !update_secondary_system_time(v, &_u) && is_pv_domain(d) &&
          !is_pv_32bit_domain(d) && !(v->arch.flags & TF_kernel_mode) )
-        v->arch.pv_vcpu.pending_system_time = _u;
+        v->arch.pv.pending_system_time = _u;
 }
 
 bool update_secondary_system_time(struct vcpu *v,
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ddff346a2f..d8325a30b5 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1144,8 +1144,7 @@ static int handle_ldt_mapping_fault(unsigned int offset,
             return 0;
 
         /* Access would have become non-canonical? Pass #GP[sel] back. */
-        if ( unlikely(!is_canonical_address(
-                          curr->arch.pv_vcpu.ldt_base + offset)) )
+        if ( unlikely(!is_canonical_address(curr->arch.pv.ldt_base + offset)) )
         {
             uint16_t ec = (offset & ~(X86_XEC_EXT | X86_XEC_IDT)) | X86_XEC_TI;
 
@@ -1154,7 +1153,7 @@ static int handle_ldt_mapping_fault(unsigned int offset,
         else
             /* else pass the #PF back, with adjusted %cr2. */
             pv_inject_page_fault(regs->error_code,
-                                 curr->arch.pv_vcpu.ldt_base + offset);
+                                 curr->arch.pv.ldt_base + offset);
     }
 
     return EXCRET_fault_fixed;
@@ -1536,7 +1535,7 @@ void do_general_protection(struct cpu_user_regs *regs)
         /* This fault must be due to <INT n> instruction. */
         const struct trap_info *ti;
         unsigned char vector = regs->error_code >> 3;
-        ti = &v->arch.pv_vcpu.trap_ctxt[vector];
+        ti = &v->arch.pv.trap_ctxt[vector];
         if ( permit_softint(TI_GET_DPL(ti), v, regs) )
         {
             regs->rip += 2;
@@ -1768,10 +1767,10 @@ void do_device_not_available(struct cpu_user_regs *regs)
 
     vcpu_restore_fpu_lazy(curr);
 
-    if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS )
+    if ( curr->arch.pv.ctrlreg[0] & X86_CR0_TS )
     {
         pv_inject_hw_exception(TRAP_no_device, X86_EVENT_NO_EC);
-        curr->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
+        curr->arch.pv.ctrlreg[0] &= ~X86_CR0_TS;
     }
     else
         TRACE_0D(TRC_PV_MATH_STATE_RESTORE);
@@ -2073,10 +2072,10 @@ void activate_debugregs(const struct vcpu *curr)
 
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv.dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv.dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv.dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv.dr_mask[3]);
     }
 }
 
@@ -2109,7 +2108,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, 
unsigned long value)
         break;
 
     case 4:
-        if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE )
+        if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE )
             return -ENODEV;
 
         /* Fallthrough */
@@ -2129,7 +2128,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, 
unsigned long value)
         break;
 
     case 5:
-        if ( v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE )
+        if ( v->arch.pv.ctrlreg[4] & X86_CR4_DE )
             return -ENODEV;
 
         /* Fallthrough */
@@ -2160,7 +2159,7 @@ long set_debugreg(struct vcpu *v, unsigned int reg, 
unsigned long value)
             {
                 if ( ((value >> i) & 3) == DR_IO )
                 {
-                    if ( !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+                    if ( !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
                         return -EPERM;
                     io_enable |= value & (3 << ((i - 16) >> 1));
                 }
diff --git a/xen/arch/x86/x86_64/asm-offsets.c 
b/xen/arch/x86/x86_64/asm-offsets.c
index 18077af7df..22a7a85f19 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -52,28 +52,23 @@ void __dummy__(void)
     OFFSET(VCPU_processor, struct vcpu, processor);
     OFFSET(VCPU_domain, struct vcpu, domain);
     OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
-    OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv_vcpu.trap_bounce);
+    OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv.trap_bounce);
     OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
-    OFFSET(VCPU_event_addr, struct vcpu, arch.pv_vcpu.event_callback_eip);
-    OFFSET(VCPU_event_sel, struct vcpu, arch.pv_vcpu.event_callback_cs);
-    OFFSET(VCPU_syscall_addr, struct vcpu,
-           arch.pv_vcpu.syscall_callback_eip);
-    OFFSET(VCPU_syscall32_addr, struct vcpu,
-           arch.pv_vcpu.syscall32_callback_eip);
-    OFFSET(VCPU_syscall32_sel, struct vcpu,
-           arch.pv_vcpu.syscall32_callback_cs);
-    OFFSET(VCPU_syscall32_disables_events, struct vcpu,
-           arch.pv_vcpu.syscall32_disables_events);
-    OFFSET(VCPU_sysenter_addr, struct vcpu,
-           arch.pv_vcpu.sysenter_callback_eip);
-    OFFSET(VCPU_sysenter_sel, struct vcpu,
-           arch.pv_vcpu.sysenter_callback_cs);
-    OFFSET(VCPU_sysenter_disables_events, struct vcpu,
-           arch.pv_vcpu.sysenter_disables_events);
-    OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv_vcpu.trap_ctxt);
-    OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp);
-    OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
-    OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
+    OFFSET(VCPU_event_addr, struct vcpu, arch.pv.event_callback_eip);
+    OFFSET(VCPU_event_sel, struct vcpu, arch.pv.event_callback_cs);
+    OFFSET(VCPU_syscall_addr, struct vcpu, arch.pv.syscall_callback_eip);
+    OFFSET(VCPU_syscall32_addr, struct vcpu, arch.pv.syscall32_callback_eip);
+    OFFSET(VCPU_syscall32_sel, struct vcpu, arch.pv.syscall32_callback_cs);
+    OFFSET(VCPU_syscall32_disables_events,
+           struct vcpu, arch.pv.syscall32_disables_events);
+    OFFSET(VCPU_sysenter_addr, struct vcpu, arch.pv.sysenter_callback_eip);
+    OFFSET(VCPU_sysenter_sel, struct vcpu, arch.pv.sysenter_callback_cs);
+    OFFSET(VCPU_sysenter_disables_events,
+           struct vcpu, arch.pv.sysenter_disables_events);
+    OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv.trap_ctxt);
+    OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp);
+    OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss);
+    OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl);
     OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
     OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
     OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index dab8c4f39d..48cb96cb91 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -371,7 +371,7 @@ UNLIKELY_END(msi_check)
         mov   VCPU_domain(%rbx), %rax
 
         /*
-         * if ( null_trap_info(v, &v->arch.pv_vcpu.trap_ctxt[0x80]) )
+         * if ( null_trap_info(v, &v->arch.pv.trap_ctxt[0x80]) )
          *    goto int80_slow_path;
          */
         mov    0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index cca4ae926e..989a53463a 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1007,8 +1007,8 @@ long subarch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 long do_stack_switch(unsigned long ss, unsigned long esp)
 {
     fixup_guest_stack_selector(current->domain, ss);
-    current->arch.pv_vcpu.kernel_ss = ss;
-    current->arch.pv_vcpu.kernel_sp = esp;
+    current->arch.pv.kernel_ss = ss;
+    current->arch.pv.kernel_sp = esp;
     return 0;
 }
 
@@ -1026,7 +1026,7 @@ long do_set_segment_base(unsigned int which, unsigned 
long base)
         if ( is_canonical_address(base) )
         {
             wrfsbase(base);
-            v->arch.pv_vcpu.fs_base = base;
+            v->arch.pv.fs_base = base;
         }
         else
             ret = -EINVAL;
@@ -1036,7 +1036,7 @@ long do_set_segment_base(unsigned int which, unsigned 
long base)
         if ( is_canonical_address(base) )
         {
             wrgsshadow(base);
-            v->arch.pv_vcpu.gs_base_user = base;
+            v->arch.pv.gs_base_user = base;
         }
         else
             ret = -EINVAL;
@@ -1046,7 +1046,7 @@ long do_set_segment_base(unsigned int which, unsigned 
long base)
         if ( is_canonical_address(base) )
         {
             wrgsbase(base);
-            v->arch.pv_vcpu.gs_base_kernel = base;
+            v->arch.pv.gs_base_kernel = base;
         }
         else
             ret = -EINVAL;
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index ed02b788d3..606b1b04da 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -169,15 +169,15 @@ void vcpu_show_registers(const struct vcpu *v)
     if ( !is_pv_vcpu(v) )
         return;
 
-    crs[0] = v->arch.pv_vcpu.ctrlreg[0];
+    crs[0] = v->arch.pv.ctrlreg[0];
     crs[2] = arch_get_cr2(v);
     crs[3] = pagetable_get_paddr(kernel ?
                                  v->arch.guest_table :
                                  v->arch.guest_table_user);
-    crs[4] = v->arch.pv_vcpu.ctrlreg[4];
-    crs[5] = v->arch.pv_vcpu.fs_base;
-    crs[6 + !kernel] = v->arch.pv_vcpu.gs_base_kernel;
-    crs[7 - !kernel] = v->arch.pv_vcpu.gs_base_user;
+    crs[4] = v->arch.pv.ctrlreg[4];
+    crs[5] = v->arch.pv.fs_base;
+    crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
+    crs[7 - !kernel] = v->arch.pv.gs_base_user;
 
     _show_registers(regs, crs, CTXT_pv_guest, v);
 }
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 30f89adb8d..532b7e04e1 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -111,7 +111,7 @@ int x86emul_read_dr(unsigned int reg, unsigned long *val,
         break;
 
     case 4 ... 5:
-        if ( !(curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_DE) )
+        if ( !(curr->arch.pv.ctrlreg[4] & X86_CR4_DE) )
         {
             *val = curr->arch.debugreg[reg + 2];
             break;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 2f029eeeb1..8eaed36d5e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -540,7 +540,7 @@ struct arch_vcpu
 
     /* Virtual Machine Extensions */
     union {
-        struct pv_vcpu pv_vcpu;
+        struct pv_vcpu pv;
         struct hvm_vcpu hvm_vcpu;
     };
 
diff --git a/xen/include/asm-x86/ldt.h b/xen/include/asm-x86/ldt.h
index 589daf83c6..a6236b21aa 100644
--- a/xen/include/asm-x86/ldt.h
+++ b/xen/include/asm-x86/ldt.h
@@ -9,7 +9,7 @@ static inline void load_LDT(struct vcpu *v)
     struct desc_struct *desc;
     unsigned long ents;
 
-    if ( (ents = v->arch.pv_vcpu.ldt_ents) == 0 )
+    if ( (ents = v->arch.pv.ldt_ents) == 0 )
         lldt(0);
     else
     {
diff --git a/xen/include/asm-x86/pv/traps.h b/xen/include/asm-x86/pv/traps.h
index 89985d109e..fcc75f5e9a 100644
--- a/xen/include/asm-x86/pv/traps.h
+++ b/xen/include/asm-x86/pv/traps.h
@@ -37,7 +37,7 @@ bool pv_emulate_invalid_op(struct cpu_user_regs *regs);
 static inline bool pv_trap_callback_registered(const struct vcpu *v,
                                                uint8_t vector)
 {
-    return v->arch.pv_vcpu.trap_ctxt[vector].address;
+    return v->arch.pv.trap_ctxt[vector].address;
 }
 
 #else  /* !CONFIG_PV */
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.