[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] xen/hvm: Rename d->arch.hvm_domain to d->arch.hvm



The trailing _domain suffix is redundant, but adds to code volume.  Drop it.

Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
CC: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
CC: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
---
 xen/arch/arm/domain_build.c         |   2 +-
 xen/arch/arm/hvm.c                  |   4 +-
 xen/arch/x86/domain.c               |   2 +-
 xen/arch/x86/domctl.c               |  10 +--
 xen/arch/x86/hvm/dom0_build.c       |   4 +-
 xen/arch/x86/hvm/domain.c           |   2 +-
 xen/arch/x86/hvm/hpet.c             |   8 +-
 xen/arch/x86/hvm/hvm.c              | 145 +++++++++++++++++-------------------
 xen/arch/x86/hvm/hypercall.c        |   6 +-
 xen/arch/x86/hvm/intercept.c        |  14 ++--
 xen/arch/x86/hvm/io.c               |  48 ++++++------
 xen/arch/x86/hvm/ioreq.c            |  80 ++++++++++----------
 xen/arch/x86/hvm/irq.c              |  50 ++++++-------
 xen/arch/x86/hvm/mtrr.c             |  14 ++--
 xen/arch/x86/hvm/pmtimer.c          |  40 +++++-----
 xen/arch/x86/hvm/rtc.c              |   4 +-
 xen/arch/x86/hvm/save.c             |   6 +-
 xen/arch/x86/hvm/stdvga.c           |  18 ++---
 xen/arch/x86/hvm/svm/svm.c          |   5 +-
 xen/arch/x86/hvm/svm/vmcb.c         |   2 +-
 xen/arch/x86/hvm/vioapic.c          |  44 +++++------
 xen/arch/x86/hvm/viridian.c         |  56 +++++++-------
 xen/arch/x86/hvm/vlapic.c           |   8 +-
 xen/arch/x86/hvm/vmsi.c             |  14 ++--
 xen/arch/x86/hvm/vmx/vmcs.c         |  12 +--
 xen/arch/x86/hvm/vmx/vmx.c          |  46 ++++++------
 xen/arch/x86/hvm/vpic.c             |  20 ++---
 xen/arch/x86/hvm/vpt.c              |  20 ++---
 xen/arch/x86/irq.c                  |  10 +--
 xen/arch/x86/mm/hap/hap.c           |  11 ++-
 xen/arch/x86/mm/mem_sharing.c       |   6 +-
 xen/arch/x86/mm/shadow/common.c     |  18 ++---
 xen/arch/x86/mm/shadow/multi.c      |   8 +-
 xen/arch/x86/physdev.c              |   2 +-
 xen/arch/x86/setup.c                |  10 +--
 xen/arch/x86/time.c                 |   8 +-
 xen/common/vm_event.c               |   2 +-
 xen/drivers/passthrough/pci.c       |   2 +-
 xen/drivers/vpci/msix.c             |   6 +-
 xen/include/asm-arm/domain.h        |   2 +-
 xen/include/asm-x86/domain.h        |   4 +-
 xen/include/asm-x86/hvm/domain.h    |   2 +-
 xen/include/asm-x86/hvm/hvm.h       |  11 ++-
 xen/include/asm-x86/hvm/irq.h       |   2 +-
 xen/include/asm-x86/hvm/nestedhvm.h |   4 +-
 xen/include/asm-x86/hvm/vioapic.h   |   2 +-
 xen/include/asm-x86/hvm/vpt.h       |   4 +-
 xen/include/asm-x86/irq.h           |   3 +-
 48 files changed, 395 insertions(+), 406 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index e1c79b2..72fd2ae 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -2075,7 +2075,7 @@ static void __init evtchn_allocate(struct domain *d)
     val |= MASK_INSR(HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL,
                      HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK);
     val |= d->arch.evtchn_irq;
-    d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ] = val;
+    d->arch.hvm.params[HVM_PARAM_CALLBACK_IRQ] = val;
 }
 
 static void __init find_gnttab_region(struct domain *d,
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index a56b3fe..76b27c9 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -59,11 +59,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         if ( op == HVMOP_set_param )
         {
-            d->arch.hvm_domain.params[a.index] = a.value;
+            d->arch.hvm.params[a.index] = a.value;
         }
         else
         {
-            a.value = d->arch.hvm_domain.params[a.index];
+            a.value = d->arch.hvm.params[a.index];
             rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
         }
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4cdcd5d..3dcd7f9 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -505,7 +505,7 @@ int arch_domain_create(struct domain *d,
 
     /* Need to determine if HAP is enabled before initialising paging */
     if ( is_hvm_domain(d) )
-        d->arch.hvm_domain.hap_enabled =
+        d->arch.hvm.hap_enabled =
             hvm_hap_supported() && (config->flags & XEN_DOMCTL_CDF_hap);
 
     if ( (rc = paging_domain_init(d, config->flags)) != 0 )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index fdbcce0..f306614 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -745,7 +745,7 @@ long arch_do_domctl(
         unsigned int fmp = domctl->u.ioport_mapping.first_mport;
         unsigned int np = domctl->u.ioport_mapping.nr_ports;
         unsigned int add = domctl->u.ioport_mapping.add_mapping;
-        struct hvm_domain *hvm_domain;
+        struct hvm_domain *hvm;
         struct g2m_ioport *g2m_ioport;
         int found = 0;
 
@@ -774,14 +774,14 @@ long arch_do_domctl(
         if ( ret )
             break;
 
-        hvm_domain = &d->arch.hvm_domain;
+        hvm = &d->arch.hvm;
         if ( add )
         {
             printk(XENLOG_G_INFO
                    "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
                    d->domain_id, fgp, fmp, np);
 
-            list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
+            list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
                 if (g2m_ioport->mport == fmp )
                 {
                     g2m_ioport->gport = fgp;
@@ -800,7 +800,7 @@ long arch_do_domctl(
                 g2m_ioport->gport = fgp;
                 g2m_ioport->mport = fmp;
                 g2m_ioport->np = np;
-                list_add_tail(&g2m_ioport->list, &hvm_domain->g2m_ioport_list);
+                list_add_tail(&g2m_ioport->list, &hvm->g2m_ioport_list);
             }
             if ( !ret )
                 ret = ioports_permit_access(d, fmp, fmp + np - 1);
@@ -815,7 +815,7 @@ long arch_do_domctl(
             printk(XENLOG_G_INFO
                    "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
                    d->domain_id, fgp, fmp, np);
-            list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
+            list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
                 if ( g2m_ioport->mport == fmp )
                 {
                     list_del(&g2m_ioport->list);
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 5065729..22e335f 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -240,7 +240,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct 
domain *d)
         if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
              HVMTRANS_okay )
             printk("Unable to zero VM86 TSS area\n");
-        d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
+        d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] =
             VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) | gaddr;
         if ( pvh_add_mem_range(d, gaddr, gaddr + HVM_VM86_TSS_SIZE,
                                E820_RESERVED) )
@@ -271,7 +271,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct 
domain *d)
     write_32bit_pse_identmap(ident_pt);
     unmap_domain_page(ident_pt);
     put_page(mfn_to_page(mfn));
-    d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
+    d->arch.hvm.params[HVM_PARAM_IDENT_PT] = gaddr;
     if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
             printk("Unable to set identity page tables as reserved in the 
memory map\n");
 
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index ae70aaf..8a2c83e 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -319,7 +319,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
     v->arch.hvm_vcpu.cache_tsc_offset =
         d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
     hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
-                       d->arch.hvm_domain.sync_tsc);
+                       d->arch.hvm.sync_tsc);
 
     paging_update_paging_modes(v);
 
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index a594254..8090699 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -26,7 +26,7 @@
 #include <xen/event.h>
 #include <xen/trace.h>
 
-#define domain_vhpet(x) (&(x)->arch.hvm_domain.pl_time->vhpet)
+#define domain_vhpet(x) (&(x)->arch.hvm.pl_time->vhpet)
 #define vcpu_vhpet(x)   (domain_vhpet((x)->domain))
 #define vhpet_domain(x) (container_of(x, struct pl_time, vhpet)->domain)
 #define vhpet_vcpu(x)   (pt_global_vcpu_target(vhpet_domain(x)))
@@ -164,7 +164,7 @@ static int hpet_read(
     unsigned long result;
     uint64_t val;
 
-    if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] )
+    if ( !v->domain->arch.hvm.params[HVM_PARAM_HPET_ENABLED] )
     {
         result = ~0ul;
         goto out;
@@ -354,7 +354,7 @@ static int hpet_write(
 #define set_start_timer(n)   (__set_bit((n), &start_timers))
 #define set_restart_timer(n) (set_stop_timer(n),set_start_timer(n))
 
-    if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] )
+    if ( !v->domain->arch.hvm.params[HVM_PARAM_HPET_ENABLED] )
         goto out;
 
     addr &= HPET_MMAP_SIZE-1;
@@ -735,7 +735,7 @@ void hpet_init(struct domain *d)
 
     hpet_set(domain_vhpet(d));
     register_mmio_handler(d, &hpet_mmio_ops);
-    d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
+    d->arch.hvm.params[HVM_PARAM_HPET_ENABLED] = 1;
 }
 
 void hpet_deinit(struct domain *d)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72c51fa..f895339 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -382,7 +382,7 @@ u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz)
 
 u64 hvm_scale_tsc(const struct domain *d, u64 tsc)
 {
-    u64 ratio = d->arch.hvm_domain.tsc_scaling_ratio;
+    u64 ratio = d->arch.hvm.tsc_scaling_ratio;
     u64 dummy;
 
     if ( ratio == hvm_default_tsc_scaling_ratio )
@@ -583,14 +583,14 @@ int hvm_domain_initialise(struct domain *d)
         return -EINVAL;
     }
 
-    spin_lock_init(&d->arch.hvm_domain.irq_lock);
-    spin_lock_init(&d->arch.hvm_domain.uc_lock);
-    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
-    rwlock_init(&d->arch.hvm_domain.mmcfg_lock);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.mmcfg_regions);
-    INIT_LIST_HEAD(&d->arch.hvm_domain.msix_tables);
+    spin_lock_init(&d->arch.hvm.irq_lock);
+    spin_lock_init(&d->arch.hvm.uc_lock);
+    spin_lock_init(&d->arch.hvm.write_map.lock);
+    rwlock_init(&d->arch.hvm.mmcfg_lock);
+    INIT_LIST_HEAD(&d->arch.hvm.write_map.list);
+    INIT_LIST_HEAD(&d->arch.hvm.g2m_ioport_list);
+    INIT_LIST_HEAD(&d->arch.hvm.mmcfg_regions);
+    INIT_LIST_HEAD(&d->arch.hvm.msix_tables);
 
     rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL);
     if ( rc )
@@ -603,15 +603,15 @@ int hvm_domain_initialise(struct domain *d)
         goto fail0;
 
     nr_gsis = is_hardware_domain(d) ? nr_irqs_gsi : NR_HVM_DOMU_IRQS;
-    d->arch.hvm_domain.pl_time = xzalloc(struct pl_time);
-    d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
-    d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler,
-                                                  NR_IO_HANDLERS);
-    d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(nr_gsis));
+    d->arch.hvm.pl_time = xzalloc(struct pl_time);
+    d->arch.hvm.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
+    d->arch.hvm.io_handler = xzalloc_array(struct hvm_io_handler,
+                                           NR_IO_HANDLERS);
+    d->arch.hvm.irq = xzalloc_bytes(hvm_irq_size(nr_gsis));
 
     rc = -ENOMEM;
-    if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq ||
-         !d->arch.hvm_domain.params  || !d->arch.hvm_domain.io_handler )
+    if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq ||
+         !d->arch.hvm.params  || !d->arch.hvm.io_handler )
         goto fail1;
 
     /* Set the number of GSIs */
@@ -621,21 +621,21 @@ int hvm_domain_initialise(struct domain *d)
     ASSERT(hvm_domain_irq(d)->nr_gsis >= NR_ISAIRQS);
 
     /* need link to containing domain */
-    d->arch.hvm_domain.pl_time->domain = d;
+    d->arch.hvm.pl_time->domain = d;
 
     /* Set the default IO Bitmap. */
     if ( is_hardware_domain(d) )
     {
-        d->arch.hvm_domain.io_bitmap = _xmalloc(HVM_IOBITMAP_SIZE, PAGE_SIZE);
-        if ( d->arch.hvm_domain.io_bitmap == NULL )
+        d->arch.hvm.io_bitmap = _xmalloc(HVM_IOBITMAP_SIZE, PAGE_SIZE);
+        if ( d->arch.hvm.io_bitmap == NULL )
         {
             rc = -ENOMEM;
             goto fail1;
         }
-        memset(d->arch.hvm_domain.io_bitmap, ~0, HVM_IOBITMAP_SIZE);
+        memset(d->arch.hvm.io_bitmap, ~0, HVM_IOBITMAP_SIZE);
     }
     else
-        d->arch.hvm_domain.io_bitmap = hvm_io_bitmap;
+        d->arch.hvm.io_bitmap = hvm_io_bitmap;
 
     register_g2m_portio_handler(d);
     register_vpci_portio_handler(d);
@@ -644,7 +644,7 @@ int hvm_domain_initialise(struct domain *d)
 
     hvm_init_guest_time(d);
 
-    d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
+    d->arch.hvm.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
 
     vpic_init(d);
 
@@ -659,7 +659,7 @@ int hvm_domain_initialise(struct domain *d)
     register_portio_handler(d, 0xe9, 1, hvm_print_line);
 
     if ( hvm_tsc_scaling_supported )
-        d->arch.hvm_domain.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
+        d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
 
     rc = hvm_funcs.domain_initialise(d);
     if ( rc != 0 )
@@ -673,11 +673,11 @@ int hvm_domain_initialise(struct domain *d)
     vioapic_deinit(d);
  fail1:
     if ( is_hardware_domain(d) )
-        xfree(d->arch.hvm_domain.io_bitmap);
-    xfree(d->arch.hvm_domain.io_handler);
-    xfree(d->arch.hvm_domain.params);
-    xfree(d->arch.hvm_domain.pl_time);
-    xfree(d->arch.hvm_domain.irq);
+        xfree(d->arch.hvm.io_bitmap);
+    xfree(d->arch.hvm.io_handler);
+    xfree(d->arch.hvm.params);
+    xfree(d->arch.hvm.pl_time);
+    xfree(d->arch.hvm.irq);
  fail0:
     hvm_destroy_cacheattr_region_list(d);
     destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0);
@@ -710,11 +710,8 @@ void hvm_domain_destroy(struct domain *d)
     struct list_head *ioport_list, *tmp;
     struct g2m_ioport *ioport;
 
-    xfree(d->arch.hvm_domain.io_handler);
-    d->arch.hvm_domain.io_handler = NULL;
-
-    xfree(d->arch.hvm_domain.params);
-    d->arch.hvm_domain.params = NULL;
+    XFREE(d->arch.hvm.io_handler);
+    XFREE(d->arch.hvm.params);
 
     hvm_destroy_cacheattr_region_list(d);
 
@@ -723,14 +720,10 @@ void hvm_domain_destroy(struct domain *d)
     stdvga_deinit(d);
     vioapic_deinit(d);
 
-    xfree(d->arch.hvm_domain.pl_time);
-    d->arch.hvm_domain.pl_time = NULL;
-
-    xfree(d->arch.hvm_domain.irq);
-    d->arch.hvm_domain.irq = NULL;
+    XFREE(d->arch.hvm.pl_time);
+    XFREE(d->arch.hvm.irq);
 
-    list_for_each_safe ( ioport_list, tmp,
-                         &d->arch.hvm_domain.g2m_ioport_list )
+    list_for_each_safe ( ioport_list, tmp, &d->arch.hvm.g2m_ioport_list )
     {
         ioport = list_entry(ioport_list, struct g2m_ioport, list);
         list_del(&ioport->list);
@@ -798,7 +791,7 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         /* Architecture-specific vmcs/vmcb bits */
         hvm_funcs.save_cpu_ctxt(v, &ctxt);
 
-        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
+        ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm.sync_tsc);
 
         ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
 
@@ -1053,7 +1046,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
     v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
 
-    hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm_domain.sync_tsc);
+    hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
 
     seg.limit = ctxt.idtr_limit;
     seg.base = ctxt.idtr_base;
@@ -1637,7 +1630,7 @@ void hvm_triple_fault(void)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    u8 reason = d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON];
+    u8 reason = d->arch.hvm.params[HVM_PARAM_TRIPLE_FAULT_REASON];
 
     gprintk(XENLOG_INFO,
             "Triple fault - invoking HVM shutdown action %d\n",
@@ -2046,7 +2039,7 @@ static bool_t domain_exit_uc_mode(struct vcpu *v)
 
 static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
 {
-    v->domain->arch.hvm_domain.is_in_uc_mode = is_in_uc_mode;
+    v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
     shadow_blow_tables_per_domain(v->domain);
 }
 
@@ -2130,10 +2123,10 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long 
value)
     if ( value & X86_CR0_CD )
     {
         /* Entering no fill cache mode. */
-        spin_lock(&v->domain->arch.hvm_domain.uc_lock);
+        spin_lock(&v->domain->arch.hvm.uc_lock);
         v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
 
-        if ( !v->domain->arch.hvm_domain.is_in_uc_mode )
+        if ( !v->domain->arch.hvm.is_in_uc_mode )
         {
             domain_pause_nosync(v->domain);
 
@@ -2143,19 +2136,19 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long 
value)
 
             domain_unpause(v->domain);
         }
-        spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
+        spin_unlock(&v->domain->arch.hvm.uc_lock);
     }
     else if ( !(value & X86_CR0_CD) &&
               (v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
     {
         /* Exit from no fill cache mode. */
-        spin_lock(&v->domain->arch.hvm_domain.uc_lock);
+        spin_lock(&v->domain->arch.hvm.uc_lock);
         v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
 
         if ( domain_exit_uc_mode(v) )
             hvm_set_uc_mode(v, 0);
 
-        spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
+        spin_unlock(&v->domain->arch.hvm.uc_lock);
     }
 }
 
@@ -2597,9 +2590,9 @@ static void *_hvm_map_guest_frame(unsigned long gfn, 
bool_t permanent,
             return NULL;
         }
         track->page = page;
-        spin_lock(&d->arch.hvm_domain.write_map.lock);
-        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
-        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+        spin_lock(&d->arch.hvm.write_map.lock);
+        list_add_tail(&track->list, &d->arch.hvm.write_map.list);
+        spin_unlock(&d->arch.hvm.write_map.lock);
     }
 
     map = __map_domain_page_global(page);
@@ -2640,8 +2633,8 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
         struct hvm_write_map *track;
 
         unmap_domain_page_global(p);
-        spin_lock(&d->arch.hvm_domain.write_map.lock);
-        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+        spin_lock(&d->arch.hvm.write_map.lock);
+        list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
             if ( track->page == page )
             {
                 paging_mark_dirty(d, mfn);
@@ -2649,7 +2642,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
                 xfree(track);
                 break;
             }
-        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+        spin_unlock(&d->arch.hvm.write_map.lock);
     }
 
     put_page(page);
@@ -2659,10 +2652,10 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain 
*d)
 {
     struct hvm_write_map *track;
 
-    spin_lock(&d->arch.hvm_domain.write_map.lock);
-    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+    spin_lock(&d->arch.hvm.write_map.lock);
+    list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
         paging_mark_dirty(d, page_to_mfn(track->page));
-    spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    spin_unlock(&d->arch.hvm.write_map.lock);
 }
 
 static void *hvm_map_entry(unsigned long va, bool_t *writable)
@@ -3942,7 +3935,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, 
uint16_t ip)
     v->arch.hvm_vcpu.cache_tsc_offset =
         v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
     hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
-                       d->arch.hvm_domain.sync_tsc);
+                       d->arch.hvm.sync_tsc);
 
     v->arch.hvm_vcpu.msr_tsc_adjust = 0;
 
@@ -3964,7 +3957,7 @@ static void hvm_s3_suspend(struct domain *d)
     domain_lock(d);
 
     if ( d->is_dying || (d->vcpu == NULL) || (d->vcpu[0] == NULL) ||
-         test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
+         test_and_set_bool(d->arch.hvm.is_s3_suspended) )
     {
         domain_unlock(d);
         domain_unpause(d);
@@ -3994,7 +3987,7 @@ static void hvm_s3_suspend(struct domain *d)
 
 static void hvm_s3_resume(struct domain *d)
 {
-    if ( test_and_clear_bool(d->arch.hvm_domain.is_s3_suspended) )
+    if ( test_and_clear_bool(d->arch.hvm.is_s3_suspended) )
     {
         struct vcpu *v;
 
@@ -4074,7 +4067,7 @@ static int hvmop_set_evtchn_upcall_vector(
 static int hvm_allow_set_param(struct domain *d,
                                const struct xen_hvm_param *a)
 {
-    uint64_t value = d->arch.hvm_domain.params[a->index];
+    uint64_t value = d->arch.hvm.params[a->index];
     int rc;
 
     rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
@@ -4177,7 +4170,7 @@ static int hvmop_set_param(
          */
         if ( !paging_mode_hap(d) || !cpu_has_vmx )
         {
-            d->arch.hvm_domain.params[a.index] = a.value;
+            d->arch.hvm.params[a.index] = a.value;
             break;
         }
 
@@ -4192,7 +4185,7 @@ static int hvmop_set_param(
 
         rc = 0;
         domain_pause(d);
-        d->arch.hvm_domain.params[a.index] = a.value;
+        d->arch.hvm.params[a.index] = a.value;
         for_each_vcpu ( d, v )
             paging_update_cr3(v, false);
         domain_unpause(d);
@@ -4241,11 +4234,11 @@ static int hvmop_set_param(
         if ( !paging_mode_hap(d) && a.value )
             rc = -EINVAL;
         if ( a.value &&
-             d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] )
+             d->arch.hvm.params[HVM_PARAM_ALTP2M] )
             rc = -EINVAL;
         /* Set up NHVM state for any vcpus that are already up. */
         if ( a.value &&
-             !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+             !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             for_each_vcpu(d, v)
                 if ( rc == 0 )
                     rc = nestedhvm_vcpu_initialise(v);
@@ -4260,7 +4253,7 @@ static int hvmop_set_param(
         if ( a.value > XEN_ALTP2M_limited )
             rc = -EINVAL;
         if ( a.value &&
-             d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+             d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             rc = -EINVAL;
         break;
     case HVM_PARAM_BUFIOREQ_EVTCHN:
@@ -4271,20 +4264,20 @@ static int hvmop_set_param(
             rc = -EINVAL;
         break;
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm_domain.ioreq_gfn.base = a.value;
+        d->arch.hvm.ioreq_gfn.base = a.value;
         break;
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
         if ( a.value == 0 ||
-             a.value > sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8 )
+             a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
         for ( i = 0; i < a.value; i++ )
-            set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+            set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
 
         break;
     }
@@ -4339,7 +4332,7 @@ static int hvmop_set_param(
     if ( rc != 0 )
         goto out;
 
-    d->arch.hvm_domain.params[a.index] = a.value;
+    d->arch.hvm.params[a.index] = a.value;
 
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
                 a.index, a.value);
@@ -4418,15 +4411,15 @@ static int hvmop_get_param(
     switch ( a.index )
     {
     case HVM_PARAM_ACPI_S_STATE:
-        a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
+        a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
         break;
 
     case HVM_PARAM_VM86_TSS:
-        a.value = 
(uint32_t)d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED];
+        a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
         break;
 
     case HVM_PARAM_VM86_TSS_SIZED:
-        a.value = d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] &
+        a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
                   ~VM86_TSS_UPDATED;
         break;
 
@@ -4453,7 +4446,7 @@ static int hvmop_get_param(
 
     /*FALLTHRU*/
     default:
-        a.value = d->arch.hvm_domain.params[a.index];
+        a.value = d->arch.hvm.params[a.index];
         break;
     }
 
@@ -4553,7 +4546,7 @@ static int do_altp2m_op(
         goto out;
     }
 
-    mode = d->arch.hvm_domain.params[HVM_PARAM_ALTP2M];
+    mode = d->arch.hvm.params[HVM_PARAM_ALTP2M];
 
     if ( XEN_ALTP2M_disabled == mode )
     {
diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c
index 85eacd7..3d7ac49 100644
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -41,7 +41,7 @@ static long hvm_memory_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         rc = compat_memory_op(cmd, arg);
 
     if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
-        curr->domain->arch.hvm_domain.qemu_mapcache_invalidate = true;
+        curr->domain->arch.hvm.qemu_mapcache_invalidate = true;
 
     return rc;
 }
@@ -286,8 +286,8 @@ int hvm_hypercall(struct cpu_user_regs *regs)
     if ( curr->hcall_preempted )
         return HVM_HCALL_preempted;
 
-    if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) &&
-         test_and_clear_bool(currd->arch.hvm_domain.qemu_mapcache_invalidate) )
+    if ( unlikely(currd->arch.hvm.qemu_mapcache_invalidate) &&
+         test_and_clear_bool(currd->arch.hvm.qemu_mapcache_invalidate) )
         send_invalidate_req();
 
     return HVM_HCALL_completed;
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 2bc156d..aac22c5 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -219,10 +219,10 @@ static const struct hvm_io_handler 
*hvm_find_io_handler(const ioreq_t *p)
     BUG_ON((p->type != IOREQ_TYPE_PIO) &&
            (p->type != IOREQ_TYPE_COPY));
 
-    for ( i = 0; i < curr_d->arch.hvm_domain.io_handler_count; i++ )
+    for ( i = 0; i < curr_d->arch.hvm.io_handler_count; i++ )
     {
         const struct hvm_io_handler *handler =
-            &curr_d->arch.hvm_domain.io_handler[i];
+            &curr_d->arch.hvm.io_handler[i];
         const struct hvm_io_ops *ops = handler->ops;
 
         if ( handler->type != p->type )
@@ -257,9 +257,9 @@ int hvm_io_intercept(ioreq_t *p)
 
 struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
 {
-    unsigned int i = d->arch.hvm_domain.io_handler_count++;
+    unsigned int i = d->arch.hvm.io_handler_count++;
 
-    ASSERT(d->arch.hvm_domain.io_handler);
+    ASSERT(d->arch.hvm.io_handler);
 
     if ( i == NR_IO_HANDLERS )
     {
@@ -267,7 +267,7 @@ struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
         return NULL;
     }
 
-    return &d->arch.hvm_domain.io_handler[i];
+    return &d->arch.hvm.io_handler[i];
 }
 
 void register_mmio_handler(struct domain *d,
@@ -303,10 +303,10 @@ void relocate_portio_handler(struct domain *d, unsigned 
int old_port,
 {
     unsigned int i;
 
-    for ( i = 0; i < d->arch.hvm_domain.io_handler_count; i++ )
+    for ( i = 0; i < d->arch.hvm.io_handler_count; i++ )
     {
         struct hvm_io_handler *handler =
-            &d->arch.hvm_domain.io_handler[i];
+            &d->arch.hvm.io_handler[i];
 
         if ( handler->type != IOREQ_TYPE_PIO )
             continue;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index bf4d874..f1ea7d7 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -179,12 +179,12 @@ static bool_t g2m_portio_accept(const struct 
hvm_io_handler *handler,
                                 const ioreq_t *p)
 {
     struct vcpu *curr = current;
-    const struct hvm_domain *hvm_domain = &curr->domain->arch.hvm_domain;
+    const struct hvm_domain *hvm = &curr->domain->arch.hvm;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     struct g2m_ioport *g2m_ioport;
     unsigned int start, end;
 
-    list_for_each_entry( g2m_ioport, &hvm_domain->g2m_ioport_list, list )
+    list_for_each_entry( g2m_ioport, &hvm->g2m_ioport_list, list )
     {
         start = g2m_ioport->gport;
         end = start + g2m_ioport->np;
@@ -313,12 +313,12 @@ static int vpci_portio_read(const struct hvm_io_handler 
*handler,
     if ( addr == 0xcf8 )
     {
         ASSERT(size == 4);
-        *data = d->arch.hvm_domain.pci_cf8;
+        *data = d->arch.hvm.pci_cf8;
         return X86EMUL_OKAY;
     }
 
     ASSERT((addr & ~3) == 0xcfc);
-    cf8 = ACCESS_ONCE(d->arch.hvm_domain.pci_cf8);
+    cf8 = ACCESS_ONCE(d->arch.hvm.pci_cf8);
     if ( !CF8_ENABLED(cf8) )
         return X86EMUL_UNHANDLEABLE;
 
@@ -343,12 +343,12 @@ static int vpci_portio_write(const struct hvm_io_handler 
*handler,
     if ( addr == 0xcf8 )
     {
         ASSERT(size == 4);
-        d->arch.hvm_domain.pci_cf8 = data;
+        d->arch.hvm.pci_cf8 = data;
         return X86EMUL_OKAY;
     }
 
     ASSERT((addr & ~3) == 0xcfc);
-    cf8 = ACCESS_ONCE(d->arch.hvm_domain.pci_cf8);
+    cf8 = ACCESS_ONCE(d->arch.hvm.pci_cf8);
     if ( !CF8_ENABLED(cf8) )
         return X86EMUL_UNHANDLEABLE;
 
@@ -397,7 +397,7 @@ static const struct hvm_mmcfg *vpci_mmcfg_find(const struct 
domain *d,
 {
     const struct hvm_mmcfg *mmcfg;
 
-    list_for_each_entry ( mmcfg, &d->arch.hvm_domain.mmcfg_regions, next )
+    list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
         if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
             return mmcfg;
 
@@ -420,9 +420,9 @@ static int vpci_mmcfg_accept(struct vcpu *v, unsigned long 
addr)
     struct domain *d = v->domain;
     bool found;
 
-    read_lock(&d->arch.hvm_domain.mmcfg_lock);
+    read_lock(&d->arch.hvm.mmcfg_lock);
     found = vpci_mmcfg_find(d, addr);
-    read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+    read_unlock(&d->arch.hvm.mmcfg_lock);
 
     return found;
 }
@@ -437,16 +437,16 @@ static int vpci_mmcfg_read(struct vcpu *v, unsigned long 
addr,
 
     *data = ~0ul;
 
-    read_lock(&d->arch.hvm_domain.mmcfg_lock);
+    read_lock(&d->arch.hvm.mmcfg_lock);
     mmcfg = vpci_mmcfg_find(d, addr);
     if ( !mmcfg )
     {
-        read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+        read_unlock(&d->arch.hvm.mmcfg_lock);
         return X86EMUL_RETRY;
     }
 
     reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf);
-    read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+    read_unlock(&d->arch.hvm.mmcfg_lock);
 
     if ( !vpci_access_allowed(reg, len) ||
          (reg + len) > PCI_CFG_SPACE_EXP_SIZE )
@@ -479,16 +479,16 @@ static int vpci_mmcfg_write(struct vcpu *v, unsigned long 
addr,
     unsigned int reg;
     pci_sbdf_t sbdf;
 
-    read_lock(&d->arch.hvm_domain.mmcfg_lock);
+    read_lock(&d->arch.hvm.mmcfg_lock);
     mmcfg = vpci_mmcfg_find(d, addr);
     if ( !mmcfg )
     {
-        read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+        read_unlock(&d->arch.hvm.mmcfg_lock);
         return X86EMUL_RETRY;
     }
 
     reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf);
-    read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+    read_unlock(&d->arch.hvm.mmcfg_lock);
 
     if ( !vpci_access_allowed(reg, len) ||
          (reg + len) > PCI_CFG_SPACE_EXP_SIZE )
@@ -527,8 +527,8 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t 
addr,
     new->segment = seg;
     new->size = (end_bus - start_bus + 1) << 20;
 
-    write_lock(&d->arch.hvm_domain.mmcfg_lock);
-    list_for_each_entry ( mmcfg, &d->arch.hvm_domain.mmcfg_regions, next )
+    write_lock(&d->arch.hvm.mmcfg_lock);
+    list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
         if ( new->addr < mmcfg->addr + mmcfg->size &&
              mmcfg->addr < new->addr + new->size )
         {
@@ -539,25 +539,25 @@ int register_vpci_mmcfg_handler(struct domain *d, paddr_t 
addr,
                  new->segment == mmcfg->segment &&
                  new->size == mmcfg->size )
                 ret = 0;
-            write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+            write_unlock(&d->arch.hvm.mmcfg_lock);
             xfree(new);
             return ret;
         }
 
-    if ( list_empty(&d->arch.hvm_domain.mmcfg_regions) )
+    if ( list_empty(&d->arch.hvm.mmcfg_regions) )
         register_mmio_handler(d, &vpci_mmcfg_ops);
 
-    list_add(&new->next, &d->arch.hvm_domain.mmcfg_regions);
-    write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+    list_add(&new->next, &d->arch.hvm.mmcfg_regions);
+    write_unlock(&d->arch.hvm.mmcfg_lock);
 
     return 0;
 }
 
 void destroy_vpci_mmcfg(struct domain *d)
 {
-    struct list_head *mmcfg_regions = &d->arch.hvm_domain.mmcfg_regions;
+    struct list_head *mmcfg_regions = &d->arch.hvm.mmcfg_regions;
 
-    write_lock(&d->arch.hvm_domain.mmcfg_lock);
+    write_lock(&d->arch.hvm.mmcfg_lock);
     while ( !list_empty(mmcfg_regions) )
     {
         struct hvm_mmcfg *mmcfg = list_first_entry(mmcfg_regions,
@@ -566,7 +566,7 @@ void destroy_vpci_mmcfg(struct domain *d)
         list_del(&mmcfg->next);
         xfree(mmcfg);
     }
-    write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+    write_unlock(&d->arch.hvm.mmcfg_lock);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 940a2c9..8d60b02 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -38,13 +38,13 @@ static void set_ioreq_server(struct domain *d, unsigned int 
id,
                              struct hvm_ioreq_server *s)
 {
     ASSERT(id < MAX_NR_IOREQ_SERVERS);
-    ASSERT(!s || !d->arch.hvm_domain.ioreq_server.server[id]);
+    ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
 
-    d->arch.hvm_domain.ioreq_server.server[id] = s;
+    d->arch.hvm.ioreq_server.server[id] = s;
 }
 
 #define GET_IOREQ_SERVER(d, id) \
-    (d)->arch.hvm_domain.ioreq_server.server[id]
+    (d)->arch.hvm.ioreq_server.server[id]
 
 static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
                                                  unsigned int id)
@@ -247,10 +247,10 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server 
*s)
 
     ASSERT(!IS_DEFAULT(s));
 
-    for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
+    for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
     {
-        if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
-            return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
+        if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
+            return _gfn(d->arch.hvm.ioreq_gfn.base + i);
     }
 
     return INVALID_GFN;
@@ -259,12 +259,12 @@ static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server 
*s)
 static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
 {
     struct domain *d = s->target;
-    unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
+    unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
 
     ASSERT(!IS_DEFAULT(s));
     ASSERT(!gfn_eq(gfn, INVALID_GFN));
 
-    set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+    set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
 }
 
 static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
@@ -307,8 +307,8 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, 
bool buf)
 
     if ( IS_DEFAULT(s) )
         iorp->gfn = _gfn(buf ?
-                         d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
-                         d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
+                         d->arch.hvm.params[HVM_PARAM_BUFIOREQ_PFN] :
+                         d->arch.hvm.params[HVM_PARAM_IOREQ_PFN]);
     else
         iorp->gfn = hvm_alloc_ioreq_gfn(s);
 
@@ -394,7 +394,7 @@ bool is_ioreq_server_page(struct domain *d, const struct 
page_info *page)
     unsigned int id;
     bool found = false;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -405,7 +405,7 @@ bool is_ioreq_server_page(struct domain *d, const struct 
page_info *page)
         }
     }
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return found;
 }
@@ -492,7 +492,7 @@ static int hvm_ioreq_server_add_vcpu(struct 
hvm_ioreq_server *s,
 
         s->bufioreq_evtchn = rc;
         if ( IS_DEFAULT(s) )
-            d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
+            d->arch.hvm.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
                 s->bufioreq_evtchn;
     }
 
@@ -797,7 +797,7 @@ int hvm_create_ioreq_server(struct domain *d, bool 
is_default,
         return -ENOMEM;
 
     domain_pause(d);
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     if ( is_default )
     {
@@ -841,13 +841,13 @@ int hvm_create_ioreq_server(struct domain *d, bool 
is_default,
     if ( id )
         *id = i;
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -862,7 +862,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     if ( id == DEFAULT_IOSERVID )
         return -EPERM;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -898,7 +898,7 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t 
id)
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -914,7 +914,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     if ( id == DEFAULT_IOSERVID )
         return -EOPNOTSUPP;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -950,7 +950,7 @@ int hvm_get_ioreq_server_info(struct domain *d, ioservid_t 
id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -967,7 +967,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t 
id,
     if ( !is_hvm_domain(d) )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1007,7 +1007,7 @@ int hvm_get_ioreq_server_frame(struct domain *d, 
ioservid_t id,
     }
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -1026,7 +1026,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( id == DEFAULT_IOSERVID )
         return -EOPNOTSUPP;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1064,7 +1064,7 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
     rc = rangeset_add_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -1083,7 +1083,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     if ( id == DEFAULT_IOSERVID )
         return -EOPNOTSUPP;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1121,7 +1121,7 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain 
*d, ioservid_t id,
     rc = rangeset_remove_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -1149,7 +1149,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
         return -EINVAL;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1166,7 +1166,7 @@ int hvm_map_mem_type_to_ioreq_server(struct domain *d, 
ioservid_t id,
     rc = p2m_set_ioreq_server(d, flags, s);
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     if ( rc == 0 && flags == 0 )
     {
@@ -1188,7 +1188,7 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     if ( id == DEFAULT_IOSERVID )
         return -EOPNOTSUPP;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1214,7 +1214,7 @@ int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
     return rc;
 }
 
@@ -1224,7 +1224,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
     unsigned int id;
     int rc;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -1233,7 +1233,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return 0;
 
@@ -1248,7 +1248,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, 
struct vcpu *v)
         hvm_ioreq_server_remove_vcpu(s, v);
     }
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     return rc;
 }
@@ -1258,12 +1258,12 @@ void hvm_all_ioreq_servers_remove_vcpu(struct domain 
*d, struct vcpu *v)
     struct hvm_ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
@@ -1271,7 +1271,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
     struct hvm_ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1291,7 +1291,7 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
         xfree(s);
     }
 
-    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
 }
 
 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
@@ -1306,7 +1306,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
         return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
 
-    cf8 = d->arch.hvm_domain.pci_cf8;
+    cf8 = d->arch.hvm.pci_cf8;
 
     if ( p->type == IOREQ_TYPE_PIO &&
          (p->addr & ~3) == 0xcfc &&
@@ -1564,7 +1564,7 @@ static int hvm_access_cf8(
     struct domain *d = current->domain;
 
     if ( dir == IOREQ_WRITE && bytes == 4 )
-        d->arch.hvm_domain.pci_cf8 = *val;
+        d->arch.hvm.pci_cf8 = *val;
 
     /* We always need to fall through to the catch all emulator */
     return X86EMUL_UNHANDLEABLE;
@@ -1572,7 +1572,7 @@ static int hvm_access_cf8(
 
 void hvm_ioreq_init(struct domain *d)
 {
-    spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_init(&d->arch.hvm.ioreq_server.lock);
 
     register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
 }
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index dfe8ed6..1ded2c2 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -52,11 +52,11 @@ int hvm_ioapic_assert(struct domain *d, unsigned int gsi, 
bool level)
         return -1;
     }
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     if ( !level || hvm_irq->gsi_assert_count[gsi]++ == 0 )
         assert_gsi(d, gsi);
     vector = vioapic_get_vector(d, gsi);
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     return vector;
 }
@@ -71,9 +71,9 @@ void hvm_ioapic_deassert(struct domain *d, unsigned int gsi)
         return;
     }
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     hvm_irq->gsi_assert_count[gsi]--;
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
@@ -122,9 +122,9 @@ static void __hvm_pci_intx_assert(
 void hvm_pci_intx_assert(
     struct domain *d, unsigned int device, unsigned int intx)
 {
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     __hvm_pci_intx_assert(d, device, intx);
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 static void __hvm_pci_intx_deassert(
@@ -156,9 +156,9 @@ static void __hvm_pci_intx_deassert(
 void hvm_pci_intx_deassert(
     struct domain *d, unsigned int device, unsigned int intx)
 {
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     __hvm_pci_intx_deassert(d, device, intx);
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 void hvm_gsi_assert(struct domain *d, unsigned int gsi)
@@ -179,13 +179,13 @@ void hvm_gsi_assert(struct domain *d, unsigned int gsi)
      * for the hardware domain, Xen needs to rely on gsi_assert_count in order
      * to know if the GSI is pending or not.
      */
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     if ( !hvm_irq->gsi_assert_count[gsi] )
     {
         hvm_irq->gsi_assert_count[gsi] = 1;
         assert_gsi(d, gsi);
     }
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 void hvm_gsi_deassert(struct domain *d, unsigned int gsi)
@@ -198,9 +198,9 @@ void hvm_gsi_deassert(struct domain *d, unsigned int gsi)
         return;
     }
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
     hvm_irq->gsi_assert_count[gsi] = 0;
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
@@ -213,7 +213,7 @@ int hvm_isa_irq_assert(struct domain *d, unsigned int 
isa_irq,
 
     ASSERT(isa_irq <= 15);
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (hvm_irq->gsi_assert_count[gsi]++ == 0) )
@@ -222,7 +222,7 @@ int hvm_isa_irq_assert(struct domain *d, unsigned int 
isa_irq,
     if ( get_vector )
         vector = get_vector(d, gsi);
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     return vector;
 }
@@ -235,13 +235,13 @@ void hvm_isa_irq_deassert(
 
     ASSERT(isa_irq <= 15);
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
          (--hvm_irq->gsi_assert_count[gsi] == 0) )
         deassert_irq(d, isa_irq);
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 static void hvm_set_callback_irq_level(struct vcpu *v)
@@ -252,7 +252,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v)
 
     ASSERT(v->vcpu_id == 0);
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
     asserted = !!vcpu_info(v, evtchn_upcall_pending);
@@ -289,7 +289,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v)
     }
 
  out:
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 void hvm_maybe_deassert_evtchn_irq(void)
@@ -331,7 +331,7 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 
isa_irq)
     if ( (link > 3) || (isa_irq > 15) )
         return -EINVAL;
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     old_isa_irq = hvm_irq->pci_link.route[link];
     if ( old_isa_irq == isa_irq )
@@ -363,7 +363,7 @@ int hvm_set_pci_link_route(struct domain *d, u8 link, u8 
isa_irq)
     }
 
  out:
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
             d->domain_id, link, old_isa_irq, isa_irq);
@@ -431,7 +431,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
          (!has_vlapic(d) || !has_vioapic(d) || !has_vpic(d)) )
         return;
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     /* Tear down old callback via. */
     if ( hvm_irq->callback_via_asserted )
@@ -481,7 +481,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     for_each_vcpu ( d, v )
         if ( is_vcpu_online(v) )
@@ -509,7 +509,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
 
 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
 {
-    struct hvm_domain *plat = &v->domain->arch.hvm_domain;
+    struct hvm_domain *plat = &v->domain->arch.hvm;
     int vector;
 
     if ( unlikely(v->nmi_pending) )
@@ -645,7 +645,7 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
     unsigned int asserted, pdev, pintx;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     pdev  = hvm_irq->callback_via.pci.dev;
     pintx = hvm_irq->callback_via.pci.intx;
@@ -666,7 +666,7 @@ static int irq_save_pci(struct domain *d, 
hvm_domain_context_t *h)
     if ( asserted )
         __hvm_pci_intx_assert(d, pdev, pintx);    
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     return rc;
 }
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index edfe5cd..8a772bc 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -539,12 +539,12 @@ static DEFINE_RCU_READ_LOCK(pinned_cacheattr_rcu_lock);
 
 void hvm_init_cacheattr_region_list(struct domain *d)
 {
-    INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
+    INIT_LIST_HEAD(&d->arch.hvm.pinned_cacheattr_ranges);
 }
 
 void hvm_destroy_cacheattr_region_list(struct domain *d)
 {
-    struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
+    struct list_head *head = &d->arch.hvm.pinned_cacheattr_ranges;
     struct hvm_mem_pinned_cacheattr_range *range;
 
     while ( !list_empty(head) )
@@ -568,7 +568,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t 
gfn,
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
-                              &d->arch.hvm_domain.pinned_cacheattr_ranges,
+                              &d->arch.hvm.pinned_cacheattr_ranges,
                               list )
     {
         if ( ((gfn_x(gfn) & mask) >= range->start) &&
@@ -612,7 +612,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t 
gfn_start,
         /* Remove the requested range. */
         rcu_read_lock(&pinned_cacheattr_rcu_lock);
         list_for_each_entry_rcu ( range,
-                                  &d->arch.hvm_domain.pinned_cacheattr_ranges,
+                                  &d->arch.hvm.pinned_cacheattr_ranges,
                                   list )
             if ( range->start == gfn_start && range->end == gfn_end )
             {
@@ -655,7 +655,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t 
gfn_start,
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
-                              &d->arch.hvm_domain.pinned_cacheattr_ranges,
+                              &d->arch.hvm.pinned_cacheattr_ranges,
                               list )
     {
         if ( range->start == gfn_start && range->end == gfn_end )
@@ -682,7 +682,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t 
gfn_start,
     range->end = gfn_end;
     range->type = type;
 
-    list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
+    list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges);
     p2m_memory_type_changed(d);
     if ( type != PAT_TYPE_WRBACK )
         flush_all(FLUSH_CACHE);
@@ -827,7 +827,7 @@ int epte_get_entry_emt(struct domain *d, unsigned long gfn, 
mfn_t mfn,
 
     if ( direct_mmio )
     {
-        if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
+        if ( (mfn_x(mfn) ^ d->arch.hvm.vmx.apic_access_mfn) >> order )
             return MTRR_TYPE_UNCACHABLE;
         if ( order )
             return -1;
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 435647f..75b9408 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -56,7 +56,7 @@
 /* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
 static void pmt_update_sci(PMTState *s)
 {
-    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
 
     ASSERT(spin_is_locked(&s->lock));
 
@@ -68,26 +68,26 @@ static void pmt_update_sci(PMTState *s)
 
 void hvm_acpi_power_button(struct domain *d)
 {
-    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+    PMTState *s = &d->arch.hvm.pl_time->vpmt;
 
     if ( !has_vpm(d) )
         return;
 
     spin_lock(&s->lock);
-    d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
+    d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS;
     pmt_update_sci(s);
     spin_unlock(&s->lock);
 }
 
 void hvm_acpi_sleep_button(struct domain *d)
 {
-    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+    PMTState *s = &d->arch.hvm.pl_time->vpmt;
 
     if ( !has_vpm(d) )
         return;
 
     spin_lock(&s->lock);
-    d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
+    d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS;
     pmt_update_sci(s);
     spin_unlock(&s->lock);
 }
@@ -97,7 +97,7 @@ void hvm_acpi_sleep_button(struct domain *d)
 static void pmt_update_time(PMTState *s)
 {
     uint64_t curr_gtime, tmp;
-    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
     uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
     
     ASSERT(spin_is_locked(&s->lock));
@@ -137,7 +137,7 @@ static void pmt_timer_callback(void *opaque)
 
     /* How close are we to the next MSB flip? */
     pmt_cycles_until_flip = TMR_VAL_MSB -
-        (s->vcpu->domain->arch.hvm_domain.acpi.tmr_val & (TMR_VAL_MSB - 1));
+        (s->vcpu->domain->arch.hvm.acpi.tmr_val & (TMR_VAL_MSB - 1));
 
     /* Overall time between MSB flips */
     time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
@@ -156,13 +156,13 @@ static int handle_evt_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct vcpu *v = current;
-    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
-    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
+    PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
     uint32_t addr, data, byte;
     int i;
 
     addr = port -
-        ((v->domain->arch.hvm_domain.params[
+        ((v->domain->arch.hvm.params[
             HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
          PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);
 
@@ -220,8 +220,8 @@ static int handle_pmt_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct vcpu *v = current;
-    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
-    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
+    PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
 
     if ( bytes != 4 || dir != IOREQ_READ )
     {
@@ -251,8 +251,8 @@ static int handle_pmt_io(
 
 static int acpi_save(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
-    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+    struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
+    PMTState *s = &d->arch.hvm.pl_time->vpmt;
     uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
     int rc;
 
@@ -282,8 +282,8 @@ static int acpi_save(struct domain *d, hvm_domain_context_t 
*h)
 
 static int acpi_load(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
-    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+    struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
+    PMTState *s = &d->arch.hvm.pl_time->vpmt;
 
     if ( !has_vpm(d) )
         return -ENODEV;
@@ -320,7 +320,7 @@ int pmtimer_change_ioport(struct domain *d, unsigned int 
version)
         return -ENODEV;
 
     /* Check that version is changing. */
-    old_version = d->arch.hvm_domain.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
+    old_version = d->arch.hvm.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
     if ( version == old_version )
         return 0;
 
@@ -346,7 +346,7 @@ int pmtimer_change_ioport(struct domain *d, unsigned int 
version)
 
 void pmtimer_init(struct vcpu *v)
 {
-    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+    PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
 
     if ( !has_vpm(v->domain) )
         return;
@@ -370,7 +370,7 @@ void pmtimer_init(struct vcpu *v)
 
 void pmtimer_deinit(struct domain *d)
 {
-    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+    PMTState *s = &d->arch.hvm.pl_time->vpmt;
 
     if ( !has_vpm(d) )
         return;
@@ -384,7 +384,7 @@ void pmtimer_reset(struct domain *d)
         return;
 
     /* Reset the counter. */
-    d->arch.hvm_domain.acpi.tmr_val = 0;
+    d->arch.hvm.acpi.tmr_val = 0;
 }
 
 /*
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 96921bb..1828587 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -38,7 +38,7 @@
 #define MIN_PER_HOUR    60
 #define HOUR_PER_DAY    24
 
-#define domain_vrtc(x) (&(x)->arch.hvm_domain.pl_time->vrtc)
+#define domain_vrtc(x) (&(x)->arch.hvm.pl_time->vrtc)
 #define vcpu_vrtc(x)   (domain_vrtc((x)->domain))
 #define vrtc_domain(x) (container_of(x, struct pl_time, vrtc)->domain)
 #define vrtc_vcpu(x)   (pt_global_vcpu_target(vrtc_domain(x)))
@@ -148,7 +148,7 @@ static void rtc_timer_update(RTCState *s)
                 s_time_t now = NOW();
 
                 s->period = period;
-                if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
+                if ( v->domain->arch.hvm.params[HVM_PARAM_VPT_ALIGN] )
                     delta = 0;
                 else
                     delta = period - ((now - s->start_time) % period);
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index d2dc430..0ace160 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -39,7 +39,7 @@ void arch_hvm_save(struct domain *d, struct hvm_save_header 
*hdr)
     hdr->gtsc_khz = d->arch.tsc_khz;
 
     /* Time when saving started */
-    d->arch.hvm_domain.sync_tsc = rdtsc();
+    d->arch.hvm.sync_tsc = rdtsc();
 }
 
 int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
@@ -74,10 +74,10 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header 
*hdr)
         hvm_set_rdtsc_exiting(d, 1);
 
     /* Time when restore started  */
-    d->arch.hvm_domain.sync_tsc = rdtsc();
+    d->arch.hvm.sync_tsc = rdtsc();
 
     /* VGA state is not saved/restored, so we nobble the cache. */
-    d->arch.hvm_domain.stdvga.cache = STDVGA_CACHE_DISABLED;
+    d->arch.hvm.stdvga.cache = STDVGA_CACHE_DISABLED;
 
     return 0;
 }
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index 925bab2..bd398db 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -134,7 +134,7 @@ static bool_t stdvga_cache_is_enabled(const struct 
hvm_hw_stdvga *s)
 
 static int stdvga_outb(uint64_t addr, uint8_t val)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     int rc = 1, prev_stdvga = s->stdvga;
 
     switch ( addr )
@@ -202,7 +202,7 @@ static void stdvga_out(uint32_t port, uint32_t bytes, 
uint32_t val)
 static int stdvga_intercept_pio(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
 
     if ( dir == IOREQ_WRITE )
     {
@@ -252,7 +252,7 @@ static unsigned int stdvga_mem_offset(
 
 static uint8_t stdvga_mem_readb(uint64_t addr)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     int plane;
     uint32_t ret, *vram_l;
     uint8_t *vram_b;
@@ -347,7 +347,7 @@ static int stdvga_mem_read(const struct hvm_io_handler 
*handler,
 
 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     int plane, write_mode, b, func_select, mask;
     uint32_t write_mask, bit_mask, set_mask, *vram_l;
     uint8_t *vram_b;
@@ -457,7 +457,7 @@ static int stdvga_mem_write(const struct hvm_io_handler 
*handler,
                             uint64_t addr, uint32_t size,
                             uint64_t data)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     ioreq_t p = {
         .type = IOREQ_TYPE_COPY,
         .addr = addr,
@@ -517,7 +517,7 @@ static int stdvga_mem_write(const struct hvm_io_handler 
*handler,
 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
                                 const ioreq_t *p)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
 
     /*
      * The range check must be done without taking the lock, to avoid
@@ -560,7 +560,7 @@ static bool_t stdvga_mem_accept(const struct hvm_io_handler 
*handler,
 
 static void stdvga_mem_complete(const struct hvm_io_handler *handler)
 {
-    struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
 
     spin_unlock(&s->lock);
 }
@@ -574,7 +574,7 @@ static const struct hvm_io_ops stdvga_mem_ops = {
 
 void stdvga_init(struct domain *d)
 {
-    struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
     struct page_info *pg;
     unsigned int i;
 
@@ -615,7 +615,7 @@ void stdvga_init(struct domain *d)
 
 void stdvga_deinit(struct domain *d)
 {
-    struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
+    struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
     int i;
 
     if ( !has_vvga(d) )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a16f372..2d52247 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1197,7 +1197,7 @@ void svm_vmenter_helper(const struct cpu_user_regs *regs)
 
 static void svm_guest_osvw_init(struct domain *d)
 {
-    struct svm_domain *svm = &d->arch.hvm_domain.svm;
+    struct svm_domain *svm = &d->arch.hvm.svm;
 
     spin_lock(&osvw_lock);
 
@@ -2006,8 +2006,7 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
     case MSR_AMD_OSVW_STATUS:
         if ( !d->arch.cpuid->extd.osvw )
             goto gpf;
-        *msr_content =
-            d->arch.hvm_domain.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
+        *msr_content = d->arch.hvm.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
         break;
 
     default:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 04518fd..d31fcfa 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -106,7 +106,7 @@ static int construct_vmcb(struct vcpu *v)
         svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
 
     vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
-    vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm_domain.io_bitmap);
+    vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
 
     /* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */
     vmcb->_vintr.fields.intr_masking = 1;
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 97b419f..9675424 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -49,7 +49,7 @@ static struct hvm_vioapic *addr_vioapic(const struct domain 
*d,
 {
     unsigned int i;
 
-    for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
 
@@ -66,7 +66,7 @@ static struct hvm_vioapic *gsi_vioapic(const struct domain *d,
 {
     unsigned int i;
 
-    for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
 
@@ -214,7 +214,7 @@ static void vioapic_write_redirent(
     int unmasked = 0;
     unsigned int gsi = vioapic->base_gsi + idx;
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
     pent = &vioapic->redirtbl[idx];
     ent  = *pent;
@@ -264,7 +264,7 @@ static void vioapic_write_redirent(
         vioapic_deliver(vioapic, idx);
     }
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 
     if ( gsi == 0 || unmasked )
         pt_may_unmask_irq(d, NULL);
@@ -388,7 +388,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, 
unsigned int pin)
     struct vcpu *v;
     unsigned int irq = vioapic->base_gsi + pin;
 
-    ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
+    ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
 
     HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
                 "dest=%x dest_mode=%x delivery_mode=%x "
@@ -476,7 +476,7 @@ void vioapic_irq_positive_edge(struct domain *d, unsigned 
int irq)
     HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
 
     ASSERT(pin < vioapic->nr_pins);
-    ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
+    ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
 
     ent = &vioapic->redirtbl[pin];
     if ( ent->fields.mask )
@@ -501,9 +501,9 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
 
     ASSERT(has_vioapic(d));
 
-    spin_lock(&d->arch.hvm_domain.irq_lock);
+    spin_lock(&d->arch.hvm.irq_lock);
 
-    for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
         unsigned int pin;
@@ -518,9 +518,9 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
 
             if ( iommu_enabled )
             {
-                spin_unlock(&d->arch.hvm_domain.irq_lock);
+                spin_unlock(&d->arch.hvm.irq_lock);
                 hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
-                spin_lock(&d->arch.hvm_domain.irq_lock);
+                spin_lock(&d->arch.hvm.irq_lock);
             }
 
             if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
@@ -533,7 +533,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.irq_lock);
+    spin_unlock(&d->arch.hvm.irq_lock);
 }
 
 int vioapic_get_mask(const struct domain *d, unsigned int gsi)
@@ -579,7 +579,7 @@ static int ioapic_save(struct domain *d, 
hvm_domain_context_t *h)
     s = domain_vioapic(d, 0);
 
     if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
-         d->arch.hvm_domain.nr_vioapics != 1 )
+         d->arch.hvm.nr_vioapics != 1 )
         return -EOPNOTSUPP;
 
     return hvm_save_entry(IOAPIC, 0, h, &s->domU);
@@ -595,7 +595,7 @@ static int ioapic_load(struct domain *d, 
hvm_domain_context_t *h)
     s = domain_vioapic(d, 0);
 
     if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
-         d->arch.hvm_domain.nr_vioapics != 1 )
+         d->arch.hvm.nr_vioapics != 1 )
         return -EOPNOTSUPP;
 
     return hvm_load_entry(IOAPIC, h, &s->domU);
@@ -609,11 +609,11 @@ void vioapic_reset(struct domain *d)
 
     if ( !has_vioapic(d) )
     {
-        ASSERT(!d->arch.hvm_domain.nr_vioapics);
+        ASSERT(!d->arch.hvm.nr_vioapics);
         return;
     }
 
-    for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
         unsigned int nr_pins = vioapic->nr_pins, base_gsi = vioapic->base_gsi;
@@ -646,7 +646,7 @@ static void vioapic_free(const struct domain *d, unsigned 
int nr_vioapics)
 
     for ( i = 0; i < nr_vioapics; i++)
         xfree(domain_vioapic(d, i));
-    xfree(d->arch.hvm_domain.vioapic);
+    xfree(d->arch.hvm.vioapic);
 }
 
 int vioapic_init(struct domain *d)
@@ -655,14 +655,14 @@ int vioapic_init(struct domain *d)
 
     if ( !has_vioapic(d) )
     {
-        ASSERT(!d->arch.hvm_domain.nr_vioapics);
+        ASSERT(!d->arch.hvm.nr_vioapics);
         return 0;
     }
 
     nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1;
 
-    if ( (d->arch.hvm_domain.vioapic == NULL) &&
-         ((d->arch.hvm_domain.vioapic =
+    if ( (d->arch.hvm.vioapic == NULL) &&
+         ((d->arch.hvm.vioapic =
            xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) )
         return -ENOMEM;
 
@@ -699,7 +699,7 @@ int vioapic_init(struct domain *d)
      */
     ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
 
-    d->arch.hvm_domain.nr_vioapics = nr_vioapics;
+    d->arch.hvm.nr_vioapics = nr_vioapics;
     vioapic_reset(d);
 
     register_mmio_handler(d, &vioapic_mmio_ops);
@@ -711,9 +711,9 @@ void vioapic_deinit(struct domain *d)
 {
     if ( !has_vioapic(d) )
     {
-        ASSERT(!d->arch.hvm_domain.nr_vioapics);
+        ASSERT(!d->arch.hvm.nr_vioapics);
         return;
     }
 
-    vioapic_free(d, d->arch.hvm_domain.nr_vioapics);
+    vioapic_free(d, d->arch.hvm.nr_vioapics);
 }
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 4860651..5ddb41b 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -223,7 +223,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
     case 2:
         /* Hypervisor information, but only if the guest has set its
            own version number. */
-        if ( d->arch.hvm_domain.viridian.guest_os_id.raw == 0 )
+        if ( d->arch.hvm.viridian.guest_os_id.raw == 0 )
             break;
         res->a = viridian_build;
         res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
@@ -268,8 +268,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
 
     case 4:
         /* Recommended hypercall usage. */
-        if ( (d->arch.hvm_domain.viridian.guest_os_id.raw == 0) ||
-             (d->arch.hvm_domain.viridian.guest_os_id.fields.os < 4) )
+        if ( (d->arch.hvm.viridian.guest_os_id.raw == 0) ||
+             (d->arch.hvm.viridian.guest_os_id.fields.os < 4) )
             break;
         res->a = CPUID4A_RELAX_TIMER_INT;
         if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
@@ -301,7 +301,7 @@ static void dump_guest_os_id(const struct domain *d)
 {
     const union viridian_guest_os_id *goi;
 
-    goi = &d->arch.hvm_domain.viridian.guest_os_id;
+    goi = &d->arch.hvm.viridian.guest_os_id;
 
     printk(XENLOG_G_INFO
            "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x 
sp: %x build: %x\n",
@@ -315,7 +315,7 @@ static void dump_hypercall(const struct domain *d)
 {
     const union viridian_hypercall_gpa *hg;
 
-    hg = &d->arch.hvm_domain.viridian.hypercall_gpa;
+    hg = &d->arch.hvm.viridian.hypercall_gpa;
 
     printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n",
            d->domain_id,
@@ -336,7 +336,7 @@ static void dump_reference_tsc(const struct domain *d)
 {
     const union viridian_reference_tsc *rt;
 
-    rt = &d->arch.hvm_domain.viridian.reference_tsc;
+    rt = &d->arch.hvm.viridian.reference_tsc;
     
     printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: enabled: %x pfn: %lx\n",
            d->domain_id,
@@ -345,7 +345,7 @@ static void dump_reference_tsc(const struct domain *d)
 
 static void enable_hypercall_page(struct domain *d)
 {
-    unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
+    unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn;
     struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     uint8_t *p;
 
@@ -483,7 +483,7 @@ void viridian_apic_assist_clear(struct vcpu *v)
 
 static void update_reference_tsc(struct domain *d, bool_t initialize)
 {
-    unsigned long gmfn = d->arch.hvm_domain.viridian.reference_tsc.fields.pfn;
+    unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn;
     struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     HV_REFERENCE_TSC_PAGE *p;
 
@@ -566,15 +566,15 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
     {
     case HV_X64_MSR_GUEST_OS_ID:
         perfc_incr(mshv_wrmsr_osid);
-        d->arch.hvm_domain.viridian.guest_os_id.raw = val;
+        d->arch.hvm.viridian.guest_os_id.raw = val;
         dump_guest_os_id(d);
         break;
 
     case HV_X64_MSR_HYPERCALL:
         perfc_incr(mshv_wrmsr_hc_page);
-        d->arch.hvm_domain.viridian.hypercall_gpa.raw = val;
+        d->arch.hvm.viridian.hypercall_gpa.raw = val;
         dump_hypercall(d);
-        if ( d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled )
+        if ( d->arch.hvm.viridian.hypercall_gpa.fields.enabled )
             enable_hypercall_page(d);
         break;
 
@@ -618,9 +618,9 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
             return 0;
 
         perfc_incr(mshv_wrmsr_tsc_msr);
-        d->arch.hvm_domain.viridian.reference_tsc.raw = val;
+        d->arch.hvm.viridian.reference_tsc.raw = val;
         dump_reference_tsc(d);
-        if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled )
+        if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
             update_reference_tsc(d, 1);
         break;
 
@@ -681,7 +681,7 @@ void viridian_time_ref_count_freeze(struct domain *d)
 {
     struct viridian_time_ref_count *trc;
 
-    trc = &d->arch.hvm_domain.viridian.time_ref_count;
+    trc = &d->arch.hvm.viridian.time_ref_count;
 
     if ( test_and_clear_bit(_TRC_running, &trc->flags) )
         trc->val = raw_trc_val(d) + trc->off;
@@ -691,7 +691,7 @@ void viridian_time_ref_count_thaw(struct domain *d)
 {
     struct viridian_time_ref_count *trc;
 
-    trc = &d->arch.hvm_domain.viridian.time_ref_count;
+    trc = &d->arch.hvm.viridian.time_ref_count;
 
     if ( !d->is_shutting_down &&
          !test_and_set_bit(_TRC_running, &trc->flags) )
@@ -710,12 +710,12 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
     {
     case HV_X64_MSR_GUEST_OS_ID:
         perfc_incr(mshv_rdmsr_osid);
-        *val = d->arch.hvm_domain.viridian.guest_os_id.raw;
+        *val = d->arch.hvm.viridian.guest_os_id.raw;
         break;
 
     case HV_X64_MSR_HYPERCALL:
         perfc_incr(mshv_rdmsr_hc_page);
-        *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
+        *val = d->arch.hvm.viridian.hypercall_gpa.raw;
         break;
 
     case HV_X64_MSR_VP_INDEX:
@@ -760,14 +760,14 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
             return 0;
 
         perfc_incr(mshv_rdmsr_tsc_msr);
-        *val = d->arch.hvm_domain.viridian.reference_tsc.raw;
+        *val = d->arch.hvm.viridian.reference_tsc.raw;
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
     {
         struct viridian_time_ref_count *trc;
 
-        trc = &d->arch.hvm_domain.viridian.time_ref_count;
+        trc = &d->arch.hvm.viridian.time_ref_count;
 
         if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
             return 0;
@@ -993,10 +993,10 @@ int viridian_hypercall(struct cpu_user_regs *regs)
 static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     struct hvm_viridian_domain_context ctxt = {
-        .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
-        .hypercall_gpa  = d->arch.hvm_domain.viridian.hypercall_gpa.raw,
-        .guest_os_id    = d->arch.hvm_domain.viridian.guest_os_id.raw,
-        .reference_tsc  = d->arch.hvm_domain.viridian.reference_tsc.raw,
+        .time_ref_count = d->arch.hvm.viridian.time_ref_count.val,
+        .hypercall_gpa  = d->arch.hvm.viridian.hypercall_gpa.raw,
+        .guest_os_id    = d->arch.hvm.viridian.guest_os_id.raw,
+        .reference_tsc  = d->arch.hvm.viridian.reference_tsc.raw,
     };
 
     if ( !is_viridian_domain(d) )
@@ -1012,12 +1012,12 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
-    d->arch.hvm_domain.viridian.time_ref_count.val = ctxt.time_ref_count;
-    d->arch.hvm_domain.viridian.hypercall_gpa.raw  = ctxt.hypercall_gpa;
-    d->arch.hvm_domain.viridian.guest_os_id.raw    = ctxt.guest_os_id;
-    d->arch.hvm_domain.viridian.reference_tsc.raw  = ctxt.reference_tsc;
+    d->arch.hvm.viridian.time_ref_count.val = ctxt.time_ref_count;
+    d->arch.hvm.viridian.hypercall_gpa.raw  = ctxt.hypercall_gpa;
+    d->arch.hvm.viridian.guest_os_id.raw    = ctxt.guest_os_id;
+    d->arch.hvm.viridian.reference_tsc.raw  = ctxt.reference_tsc;
 
-    if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled )
+    if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
         update_reference_tsc(d, 0);
 
     return 0;
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index ec089cc..04702e9 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1203,10 +1203,10 @@ int vlapic_accept_pic_intr(struct vcpu *v)
         return 0;
 
     TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR,
-             (v == v->domain->arch.hvm_domain.i8259_target),
+             (v == v->domain->arch.hvm.i8259_target),
              v ? __vlapic_accept_pic_intr(v) : -1);
 
-    return ((v == v->domain->arch.hvm_domain.i8259_target) &&
+    return ((v == v->domain->arch.hvm.i8259_target) &&
             __vlapic_accept_pic_intr(v));
 }
 
@@ -1224,9 +1224,9 @@ void vlapic_adjust_i8259_target(struct domain *d)
     v = d->vcpu ? d->vcpu[0] : NULL;
 
  found:
-    if ( d->arch.hvm_domain.i8259_target == v )
+    if ( d->arch.hvm.i8259_target == v )
         return;
-    d->arch.hvm_domain.i8259_target = v;
+    d->arch.hvm.i8259_target = v;
     pt_adjust_global_vcpu_target(v);
 }
 
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 3001d5c..ccbf181 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -173,7 +173,7 @@ static DEFINE_RCU_READ_LOCK(msixtbl_rcu_lock);
  */
 static bool msixtbl_initialised(const struct domain *d)
 {
-    return !!d->arch.hvm_domain.msixtbl_list.next;
+    return !!d->arch.hvm.msixtbl_list.next;
 }
 
 static struct msixtbl_entry *msixtbl_find_entry(
@@ -182,7 +182,7 @@ static struct msixtbl_entry *msixtbl_find_entry(
     struct msixtbl_entry *entry;
     struct domain *d = v->domain;
 
-    list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+    list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
         if ( addr >= entry->gtable &&
              addr < entry->gtable + entry->table_len )
             return entry;
@@ -430,7 +430,7 @@ static void add_msixtbl_entry(struct domain *d,
     entry->pdev = pdev;
     entry->gtable = (unsigned long) gtable;
 
-    list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list);
+    list_add_rcu(&entry->list, &d->arch.hvm.msixtbl_list);
 }
 
 static void free_msixtbl_entry(struct rcu_head *rcu)
@@ -483,7 +483,7 @@ int msixtbl_pt_register(struct domain *d, struct pirq 
*pirq, uint64_t gtable)
 
     pdev = msi_desc->dev;
 
-    list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+    list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
 
@@ -542,7 +542,7 @@ void msixtbl_pt_unregister(struct domain *d, struct pirq 
*pirq)
 
     pdev = msi_desc->dev;
 
-    list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+    list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
 
@@ -564,7 +564,7 @@ void msixtbl_init(struct domain *d)
     if ( !is_hvm_domain(d) || !has_vlapic(d) || msixtbl_initialised(d) )
         return;
 
-    INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
+    INIT_LIST_HEAD(&d->arch.hvm.msixtbl_list);
 
     handler = hvm_next_io_handler(d);
     if ( handler )
@@ -584,7 +584,7 @@ void msixtbl_pt_cleanup(struct domain *d)
     spin_lock(&d->event_lock);
 
     list_for_each_entry_safe( entry, temp,
-                              &d->arch.hvm_domain.msixtbl_list, list )
+                              &d->arch.hvm.msixtbl_list, list )
         del_msixtbl_entry(entry);
 
     spin_unlock(&d->event_lock);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 6681032..f30850c 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1108,8 +1108,8 @@ static int construct_vmcs(struct vcpu *v)
     }
 
     /* I/O access bitmap. */
-    __vmwrite(IO_BITMAP_A, __pa(d->arch.hvm_domain.io_bitmap));
-    __vmwrite(IO_BITMAP_B, __pa(d->arch.hvm_domain.io_bitmap) + PAGE_SIZE);
+    __vmwrite(IO_BITMAP_A, __pa(d->arch.hvm.io_bitmap));
+    __vmwrite(IO_BITMAP_B, __pa(d->arch.hvm.io_bitmap) + PAGE_SIZE);
 
     if ( cpu_has_vmx_virtual_intr_delivery )
     {
@@ -1263,7 +1263,7 @@ static int construct_vmcs(struct vcpu *v)
         __vmwrite(XSS_EXIT_BITMAP, 0);
 
     if ( cpu_has_vmx_tsc_scaling )
-        __vmwrite(TSC_MULTIPLIER, d->arch.hvm_domain.tsc_scaling_ratio);
+        __vmwrite(TSC_MULTIPLIER, d->arch.hvm.tsc_scaling_ratio);
 
     /* will update HOST & GUEST_CR3 as reqd */
     paging_update_paging_modes(v);
@@ -1643,7 +1643,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v)
 
 bool_t vmx_domain_pml_enabled(const struct domain *d)
 {
-    return !!(d->arch.hvm_domain.vmx.status & VMX_DOMAIN_PML_ENABLED);
+    return !!(d->arch.hvm.vmx.status & VMX_DOMAIN_PML_ENABLED);
 }
 
 /*
@@ -1668,7 +1668,7 @@ int vmx_domain_enable_pml(struct domain *d)
         if ( (rc = vmx_vcpu_enable_pml(v)) != 0 )
             goto error;
 
-    d->arch.hvm_domain.vmx.status |= VMX_DOMAIN_PML_ENABLED;
+    d->arch.hvm.vmx.status |= VMX_DOMAIN_PML_ENABLED;
 
     return 0;
 
@@ -1697,7 +1697,7 @@ void vmx_domain_disable_pml(struct domain *d)
     for_each_vcpu ( d, v )
         vmx_vcpu_disable_pml(v);
 
-    d->arch.hvm_domain.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
+    d->arch.hvm.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
 }
 
 /*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 73f0d52..ccfbacb 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -318,7 +318,7 @@ void vmx_pi_hooks_assign(struct domain *d)
     if ( !iommu_intpost || !is_hvm_domain(d) )
         return;
 
-    ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
+    ASSERT(!d->arch.hvm.pi_ops.vcpu_block);
 
     /*
      * We carefully handle the timing here:
@@ -329,8 +329,8 @@ void vmx_pi_hooks_assign(struct domain *d)
      * This can make sure the PI (especially the NDST feild) is
      * in proper state when we call vmx_vcpu_block().
      */
-    d->arch.hvm_domain.pi_ops.switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.pi_ops.switch_to = vmx_pi_switch_to;
+    d->arch.hvm.pi_ops.switch_from = vmx_pi_switch_from;
+    d->arch.hvm.pi_ops.switch_to = vmx_pi_switch_to;
 
     for_each_vcpu ( d, v )
     {
@@ -345,8 +345,8 @@ void vmx_pi_hooks_assign(struct domain *d)
                 x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK));
     }
 
-    d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.pi_ops.do_resume = vmx_pi_do_resume;
+    d->arch.hvm.pi_ops.vcpu_block = vmx_vcpu_block;
+    d->arch.hvm.pi_ops.do_resume = vmx_pi_do_resume;
 }
 
 /* This function is called when pcidevs_lock is held */
@@ -357,7 +357,7 @@ void vmx_pi_hooks_deassign(struct domain *d)
     if ( !iommu_intpost || !is_hvm_domain(d) )
         return;
 
-    ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
+    ASSERT(d->arch.hvm.pi_ops.vcpu_block);
 
     /*
      * Pausing the domain can make sure the vCPUs are not
@@ -369,7 +369,7 @@ void vmx_pi_hooks_deassign(struct domain *d)
     domain_pause(d);
 
     /*
-     * Note that we don't set 'd->arch.hvm_domain.pi_ops.switch_to' to NULL
+     * Note that we don't set 'd->arch.hvm.pi_ops.switch_to' to NULL
      * here. If we deassign the hooks while the vCPU is runnable in the
      * runqueue with 'SN' set, all the future notification event will be
      * suppressed since vmx_deliver_posted_intr() also use 'SN' bit
@@ -382,9 +382,9 @@ void vmx_pi_hooks_deassign(struct domain *d)
      * system, leave it here until we find a clean solution to deassign the
      * 'switch_to' hook function.
      */
-    d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
-    d->arch.hvm_domain.pi_ops.switch_from = NULL;
-    d->arch.hvm_domain.pi_ops.do_resume = NULL;
+    d->arch.hvm.pi_ops.vcpu_block = NULL;
+    d->arch.hvm.pi_ops.switch_from = NULL;
+    d->arch.hvm.pi_ops.do_resume = NULL;
 
     for_each_vcpu ( d, v )
         vmx_pi_unblock_vcpu(v);
@@ -934,8 +934,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
     vmx_restore_host_msrs();
     vmx_save_dr(v);
 
-    if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
-        v->domain->arch.hvm_domain.pi_ops.switch_from(v);
+    if ( v->domain->arch.hvm.pi_ops.switch_from )
+        v->domain->arch.hvm.pi_ops.switch_from(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -943,8 +943,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
 
-    if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
-        v->domain->arch.hvm_domain.pi_ops.switch_to(v);
+    if ( v->domain->arch.hvm.pi_ops.switch_to )
+        v->domain->arch.hvm.pi_ops.switch_to(v);
 }
 
 
@@ -1104,7 +1104,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         if ( seg == x86_seg_tr ) 
         {
             const struct domain *d = v->domain;
-            uint64_t val = d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED];
+            uint64_t val = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
 
             if ( val )
             {
@@ -1115,7 +1115,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
                 if ( val & VM86_TSS_UPDATED )
                 {
                     hvm_prepare_vm86_tss(v, base, limit);
-                    
cmpxchg(&d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED],
+                    cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED],
                             val, val & ~VM86_TSS_UPDATED);
                 }
                 v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
@@ -1626,7 +1626,7 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr,
         {
             if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
                 v->arch.hvm_vcpu.hw_cr[3] =
-                    v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT];
+                    v->domain->arch.hvm.params[HVM_PARAM_IDENT_PT];
             vmx_load_pdptrs(v);
         }
 
@@ -2997,7 +2997,7 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
     mfn = page_to_mfn(pg);
     clear_domain_page(mfn);
     share_xen_page_with_guest(pg, d, SHARE_rw);
-    d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
+    d->arch.hvm.vmx.apic_access_mfn = mfn_x(mfn);
     set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
                        PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
 
@@ -3006,7 +3006,7 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
 
 static void vmx_free_vlapic_mapping(struct domain *d)
 {
-    unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
+    unsigned long mfn = d->arch.hvm.vmx.apic_access_mfn;
 
     if ( mfn != 0 )
         free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
@@ -3016,13 +3016,13 @@ static void vmx_install_vlapic_mapping(struct vcpu *v)
 {
     paddr_t virt_page_ma, apic_page_ma;
 
-    if ( v->domain->arch.hvm_domain.vmx.apic_access_mfn == 0 )
+    if ( v->domain->arch.hvm.vmx.apic_access_mfn == 0 )
         return;
 
     ASSERT(cpu_has_vmx_virtualize_apic_accesses);
 
     virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
-    apic_page_ma = v->domain->arch.hvm_domain.vmx.apic_access_mfn;
+    apic_page_ma = v->domain->arch.hvm.vmx.apic_access_mfn;
     apic_page_ma <<= PAGE_SHIFT;
 
     vmx_vmcs_enter(v);
@@ -4330,8 +4330,8 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
      if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m 
)
          return false;
 
-    if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
-        curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);
+    if ( curr->domain->arch.hvm.pi_ops.do_resume )
+        curr->domain->arch.hvm.pi_ops.do_resume(curr);
 
     if ( !cpu_has_vmx_vpid )
         goto out;
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index cfc9544..e0500c5 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -35,7 +35,7 @@
 #include <asm/hvm/support.h>
 
 #define vpic_domain(v) (container_of((v), struct domain, \
-                        arch.hvm_domain.vpic[!vpic->is_master]))
+                        arch.hvm.vpic[!vpic->is_master]))
 #define __vpic_lock(v) &container_of((v), struct hvm_domain, \
                                         vpic[!(v)->is_master])->irq_lock
 #define vpic_lock(v)   spin_lock(__vpic_lock(v))
@@ -112,7 +112,7 @@ static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
         if ( vpic->is_master )
         {
             /* Master INT line is connected in Virtual Wire Mode. */
-            struct vcpu *v = vpic_domain(vpic)->arch.hvm_domain.i8259_target;
+            struct vcpu *v = vpic_domain(vpic)->arch.hvm.i8259_target;
             if ( v != NULL )
             {
                 TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq);
@@ -334,7 +334,7 @@ static int vpic_intercept_pic_io(
         return X86EMUL_OKAY;
     }
 
-    vpic = &current->domain->arch.hvm_domain.vpic[port >> 7];
+    vpic = &current->domain->arch.hvm.vpic[port >> 7];
 
     if ( dir == IOREQ_WRITE )
         vpic_ioport_write(vpic, port, (uint8_t)*val);
@@ -352,7 +352,7 @@ static int vpic_intercept_elcr_io(
 
     BUG_ON(bytes != 1);
 
-    vpic = &current->domain->arch.hvm_domain.vpic[port & 1];
+    vpic = &current->domain->arch.hvm.vpic[port & 1];
 
     if ( dir == IOREQ_WRITE )
     {
@@ -382,7 +382,7 @@ static int vpic_save(struct domain *d, hvm_domain_context_t 
*h)
     /* Save the state of both PICs */
     for ( i = 0; i < 2 ; i++ )
     {
-        s = &d->arch.hvm_domain.vpic[i];
+        s = &d->arch.hvm.vpic[i];
         if ( hvm_save_entry(PIC, i, h, s) )
             return 1;
     }
@@ -401,7 +401,7 @@ static int vpic_load(struct domain *d, hvm_domain_context_t 
*h)
     /* Which PIC is this? */
     if ( inst > 1 )
         return -EINVAL;
-    s = &d->arch.hvm_domain.vpic[inst];
+    s = &d->arch.hvm.vpic[inst];
 
     /* Load the state */
     if ( hvm_load_entry(PIC, h, s) != 0 )
@@ -420,7 +420,7 @@ void vpic_reset(struct domain *d)
         return;
 
     /* Master PIC. */
-    vpic = &d->arch.hvm_domain.vpic[0];
+    vpic = &d->arch.hvm.vpic[0];
     memset(vpic, 0, sizeof(*vpic));
     vpic->is_master = 1;
     vpic->elcr      = 1 << 2;
@@ -446,7 +446,7 @@ void vpic_init(struct domain *d)
 
 void vpic_irq_positive_edge(struct domain *d, int irq)
 {
-    struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
+    struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[irq >> 3];
     uint8_t mask = 1 << (irq & 7);
 
     ASSERT(has_vpic(d));
@@ -464,7 +464,7 @@ void vpic_irq_positive_edge(struct domain *d, int irq)
 
 void vpic_irq_negative_edge(struct domain *d, int irq)
 {
-    struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
+    struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[irq >> 3];
     uint8_t mask = 1 << (irq & 7);
 
     ASSERT(has_vpic(d));
@@ -483,7 +483,7 @@ void vpic_irq_negative_edge(struct domain *d, int irq)
 int vpic_ack_pending_irq(struct vcpu *v)
 {
     int irq, vector;
-    struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
+    struct hvm_hw_vpic *vpic = &v->domain->arch.hvm.vpic[0];
 
     ASSERT(has_vpic(v->domain));
 
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 6ac4c91..7b57017 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -24,11 +24,11 @@
 #include <asm/mc146818rtc.h>
 
 #define mode_is(d, name) \
-    ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
+    ((d)->arch.hvm.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
 
 void hvm_init_guest_time(struct domain *d)
 {
-    struct pl_time *pl = d->arch.hvm_domain.pl_time;
+    struct pl_time *pl = d->arch.hvm.pl_time;
 
     spin_lock_init(&pl->pl_time_lock);
     pl->stime_offset = -(u64)get_s_time();
@@ -37,7 +37,7 @@ void hvm_init_guest_time(struct domain *d)
 
 uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc)
 {
-    struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
+    struct pl_time *pl = v->domain->arch.hvm.pl_time;
     u64 now;
 
     /* Called from device models shared with PV guests. Be careful. */
@@ -88,7 +88,7 @@ static int pt_irq_vector(struct periodic_time *pt, enum 
hvm_intsrc src)
     gsi = hvm_isa_irq_to_gsi(isa_irq);
 
     if ( src == hvm_intsrc_pic )
-        return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
+        return (v->domain->arch.hvm.vpic[isa_irq >> 3].irq_base
                 + (isa_irq & 7));
 
     ASSERT(src == hvm_intsrc_lapic);
@@ -121,7 +121,7 @@ static int pt_irq_masked(struct periodic_time *pt)
 
     case PTSRC_isa:
     {
-        uint8_t pic_imr = v->domain->arch.hvm_domain.vpic[pt->irq >> 3].imr;
+        uint8_t pic_imr = v->domain->arch.hvm.vpic[pt->irq >> 3].imr;
 
         /* Check if the interrupt is unmasked in the PIC. */
         if ( !(pic_imr & (1 << (pt->irq & 7))) && vlapic_accept_pic_intr(v) )
@@ -363,7 +363,7 @@ int pt_update_irq(struct vcpu *v)
     case PTSRC_isa:
         hvm_isa_irq_deassert(v->domain, irq);
         if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
-             v->domain->arch.hvm_domain.vpic[irq >> 3].int_output )
+             v->domain->arch.hvm.vpic[irq >> 3].int_output )
             hvm_isa_irq_assert(v->domain, irq, NULL);
         else
         {
@@ -514,7 +514,7 @@ void create_periodic_time(
 
     if ( !pt->one_shot )
     {
-        if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
+        if ( v->domain->arch.hvm.params[HVM_PARAM_VPT_ALIGN] )
         {
             pt->scheduled = align_timer(pt->scheduled, pt->period);
         }
@@ -605,7 +605,7 @@ void pt_adjust_global_vcpu_target(struct vcpu *v)
     pt_adjust_vcpu(&vpit->pt0, v);
     spin_unlock(&vpit->lock);
 
-    pl_time = v->domain->arch.hvm_domain.pl_time;
+    pl_time = v->domain->arch.hvm.pl_time;
 
     spin_lock(&pl_time->vrtc.lock);
     pt_adjust_vcpu(&pl_time->vrtc.pt, v);
@@ -640,9 +640,9 @@ void pt_may_unmask_irq(struct domain *d, struct 
periodic_time *vlapic_pt)
     if ( d )
     {
         pt_resume(&d->arch.vpit.pt0);
-        pt_resume(&d->arch.hvm_domain.pl_time->vrtc.pt);
+        pt_resume(&d->arch.hvm.pl_time->vrtc.pt);
         for ( i = 0; i < HPET_TIMER_NUM; i++ )
-            pt_resume(&d->arch.hvm_domain.pl_time->vhpet.pt[i]);
+            pt_resume(&d->arch.hvm.pl_time->vhpet.pt[i]);
     }
 
     if ( vlapic_pt )
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 6865c79..ec93ab6 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1293,7 +1293,7 @@ int init_domain_irq_mapping(struct domain *d)
 
     radix_tree_init(&d->arch.irq_pirq);
     if ( is_hvm_domain(d) )
-        radix_tree_init(&d->arch.hvm_domain.emuirq_pirq);
+        radix_tree_init(&d->arch.hvm.emuirq_pirq);
 
     for ( i = 1; platform_legacy_irq(i); ++i )
     {
@@ -1319,7 +1319,7 @@ void cleanup_domain_irq_mapping(struct domain *d)
 {
     radix_tree_destroy(&d->arch.irq_pirq, NULL);
     if ( is_hvm_domain(d) )
-        radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq, NULL);
+        radix_tree_destroy(&d->arch.hvm.emuirq_pirq, NULL);
 }
 
 struct pirq *alloc_pirq_struct(struct domain *d)
@@ -2490,7 +2490,7 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, 
int emuirq)
     /* do not store emuirq mappings for pt devices */
     if ( emuirq != IRQ_PT )
     {
-        int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+        int err = radix_tree_insert(&d->arch.hvm.emuirq_pirq, emuirq,
                                     radix_tree_int_to_ptr(pirq));
 
         switch ( err )
@@ -2500,7 +2500,7 @@ int map_domain_emuirq_pirq(struct domain *d, int pirq, 
int emuirq)
         case -EEXIST:
             radix_tree_replace_slot(
                 radix_tree_lookup_slot(
-                    &d->arch.hvm_domain.emuirq_pirq, emuirq),
+                    &d->arch.hvm.emuirq_pirq, emuirq),
                 radix_tree_int_to_ptr(pirq));
             break;
         default:
@@ -2542,7 +2542,7 @@ int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
         pirq_cleanup_check(info, d);
     }
     if ( emuirq != IRQ_PT )
-        radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq);
+        radix_tree_delete(&d->arch.hvm.emuirq_pirq, emuirq);
 
  done:
     return ret;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 812a840..fe10e9d 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -83,7 +83,7 @@ int hap_track_dirty_vram(struct domain *d,
 
         paging_lock(d);
 
-        dirty_vram = d->arch.hvm_domain.dirty_vram;
+        dirty_vram = d->arch.hvm.dirty_vram;
         if ( !dirty_vram )
         {
             rc = -ENOMEM;
@@ -93,7 +93,7 @@ int hap_track_dirty_vram(struct domain *d,
                 goto out;
             }
 
-            d->arch.hvm_domain.dirty_vram = dirty_vram;
+            d->arch.hvm.dirty_vram = dirty_vram;
         }
 
         if ( begin_pfn != dirty_vram->begin_pfn ||
@@ -145,7 +145,7 @@ int hap_track_dirty_vram(struct domain *d,
     {
         paging_lock(d);
 
-        dirty_vram = d->arch.hvm_domain.dirty_vram;
+        dirty_vram = d->arch.hvm.dirty_vram;
         if ( dirty_vram )
         {
             /*
@@ -155,7 +155,7 @@ int hap_track_dirty_vram(struct domain *d,
             begin_pfn = dirty_vram->begin_pfn;
             nr = dirty_vram->end_pfn - dirty_vram->begin_pfn;
             xfree(dirty_vram);
-            d->arch.hvm_domain.dirty_vram = NULL;
+            d->arch.hvm.dirty_vram = NULL;
         }
 
         paging_unlock(d);
@@ -579,8 +579,7 @@ void hap_teardown(struct domain *d, bool *preempted)
 
     d->arch.paging.mode &= ~PG_log_dirty;
 
-    xfree(d->arch.hvm_domain.dirty_vram);
-    d->arch.hvm_domain.dirty_vram = NULL;
+    XFREE(d->arch.hvm.dirty_vram);
 
 out:
     paging_unlock(d);
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index fad8a9d..fe1df83 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -150,7 +150,7 @@ static inline shr_handle_t get_next_handle(void)
 }
 
 #define mem_sharing_enabled(d) \
-    (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
+    (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
 
 static atomic_t nr_saved_mfns   = ATOMIC_INIT(0); 
 static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
@@ -1333,7 +1333,7 @@ int 
mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 
     /* Only HAP is supported */
     rc = -ENODEV;
-    if ( !hap_enabled(d) || !d->arch.hvm_domain.mem_sharing_enabled )
+    if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled )
         goto out;
 
     switch ( mso.op )
@@ -1613,7 +1613,7 @@ int mem_sharing_domctl(struct domain *d, struct 
xen_domctl_mem_sharing_op *mec)
             if ( unlikely(need_iommu(d) && mec->u.enable) )
                 rc = -EXDEV;
             else
-                d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
+                d->arch.hvm.mem_sharing_enabled = mec->u.enable;
         }
         break;
 
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 1930a1d..afdc27d 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2881,11 +2881,11 @@ void shadow_teardown(struct domain *d, bool *preempted)
      * calls now that we've torn down the bitmap */
     d->arch.paging.mode &= ~PG_log_dirty;
 
-    if (d->arch.hvm_domain.dirty_vram) {
-        xfree(d->arch.hvm_domain.dirty_vram->sl1ma);
-        xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap);
-        xfree(d->arch.hvm_domain.dirty_vram);
-        d->arch.hvm_domain.dirty_vram = NULL;
+    if ( d->arch.hvm.dirty_vram )
+    {
+        xfree(d->arch.hvm.dirty_vram->sl1ma);
+        xfree(d->arch.hvm.dirty_vram->dirty_bitmap);
+        XFREE(d->arch.hvm.dirty_vram);
     }
 
 out:
@@ -3261,7 +3261,7 @@ int shadow_track_dirty_vram(struct domain *d,
     p2m_lock(p2m_get_hostp2m(d));
     paging_lock(d);
 
-    dirty_vram = d->arch.hvm_domain.dirty_vram;
+    dirty_vram = d->arch.hvm.dirty_vram;
 
     if ( dirty_vram && (!nr ||
              ( begin_pfn != dirty_vram->begin_pfn
@@ -3272,7 +3272,7 @@ int shadow_track_dirty_vram(struct domain *d,
         xfree(dirty_vram->sl1ma);
         xfree(dirty_vram->dirty_bitmap);
         xfree(dirty_vram);
-        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
+        dirty_vram = d->arch.hvm.dirty_vram = NULL;
     }
 
     if ( !nr )
@@ -3299,7 +3299,7 @@ int shadow_track_dirty_vram(struct domain *d,
             goto out;
         dirty_vram->begin_pfn = begin_pfn;
         dirty_vram->end_pfn = end_pfn;
-        d->arch.hvm_domain.dirty_vram = dirty_vram;
+        d->arch.hvm.dirty_vram = dirty_vram;
 
         if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL )
             goto out_dirty_vram;
@@ -3418,7 +3418,7 @@ int shadow_track_dirty_vram(struct domain *d,
     xfree(dirty_vram->sl1ma);
 out_dirty_vram:
     xfree(dirty_vram);
-    dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
+    dirty_vram = d->arch.hvm.dirty_vram = NULL;
 
 out:
     paging_unlock(d);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 9e43533..62819eb 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -527,7 +527,7 @@ _sh_propagate(struct vcpu *v,
     guest_l1e_t guest_entry = { guest_intpte };
     shadow_l1e_t *sp = shadow_entry_ptr;
     struct domain *d = v->domain;
-    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
     gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
     u32 pass_thru_flags;
     u32 gflags, sflags;
@@ -619,7 +619,7 @@ _sh_propagate(struct vcpu *v,
         if ( !mmio_mfn &&
              (type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
-        else if ( d->arch.hvm_domain.is_in_uc_mode )
+        else if ( d->arch.hvm.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
         else
             if ( iomem_access_permitted(d, mfn_x(target_mfn), 
mfn_x(target_mfn)) )
@@ -1110,7 +1110,7 @@ static inline void shadow_vram_get_l1e(shadow_l1e_t 
new_sl1e,
     mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
     int flags = shadow_l1e_get_flags(new_sl1e);
     unsigned long gfn;
-    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
 
     if ( !dirty_vram         /* tracking disabled? */
          || !(flags & _PAGE_RW) /* read-only mapping? */
@@ -1141,7 +1141,7 @@ static inline void shadow_vram_put_l1e(shadow_l1e_t 
old_sl1e,
     mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
     int flags = shadow_l1e_get_flags(old_sl1e);
     unsigned long gfn;
-    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
 
     if ( !dirty_vram         /* tracking disabled? */
          || !(flags & _PAGE_RW) /* read-only mapping? */
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 4524823..3a3c158 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -98,7 +98,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index, int 
*pirq_p,
     {
         /*
          * Only makes sense for vector-based callback, else HVM-IRQ logic
-         * calls back into itself and deadlocks on hvm_domain.irq_lock.
+         * calls back into itself and deadlocks on hvm.irq_lock.
          */
         if ( !is_hvm_pv_evtchn_domain(d) )
             return -EINVAL;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index dd11815..ed133fc 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1870,7 +1870,7 @@ static int __hwdom_init io_bitmap_cb(unsigned long s, 
unsigned long e,
 
     ASSERT(e <= INT_MAX);
     for ( i = s; i <= e; i++ )
-        __clear_bit(i, d->arch.hvm_domain.io_bitmap);
+        __clear_bit(i, d->arch.hvm.io_bitmap);
 
     return 0;
 }
@@ -1881,7 +1881,7 @@ void __hwdom_init setup_io_bitmap(struct domain *d)
 
     if ( is_hvm_domain(d) )
     {
-        bitmap_fill(d->arch.hvm_domain.io_bitmap, 0x10000);
+        bitmap_fill(d->arch.hvm.io_bitmap, 0x10000);
         rc = rangeset_report_ranges(d->arch.ioport_caps, 0, 0x10000,
                                     io_bitmap_cb, d);
         BUG_ON(rc);
@@ -1892,9 +1892,9 @@ void __hwdom_init setup_io_bitmap(struct domain *d)
          * Access to 1 byte RTC ports also needs to be trapped in order
          * to keep consistency with PV.
          */
-        __set_bit(0xcf8, d->arch.hvm_domain.io_bitmap);
-        __set_bit(RTC_PORT(0), d->arch.hvm_domain.io_bitmap);
-        __set_bit(RTC_PORT(1), d->arch.hvm_domain.io_bitmap);
+        __set_bit(0xcf8, d->arch.hvm.io_bitmap);
+        __set_bit(RTC_PORT(0), d->arch.hvm.io_bitmap);
+        __set_bit(RTC_PORT(1), d->arch.hvm.io_bitmap);
     }
 }
 
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 69e9aaf..5922fbf 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1039,7 +1039,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int 
force)
 
         if ( is_hvm_domain(d) )
         {
-            struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
+            struct pl_time *pl = v->domain->arch.hvm.pl_time;
 
             stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
             if ( stime >= 0 )
@@ -2183,7 +2183,7 @@ void tsc_set_info(struct domain *d,
     if ( is_hvm_domain(d) )
     {
         if ( hvm_tsc_scaling_supported && !d->arch.vtsc )
-            d->arch.hvm_domain.tsc_scaling_ratio =
+            d->arch.hvm.tsc_scaling_ratio =
                 hvm_get_tsc_scaling_ratio(d->arch.tsc_khz);
 
         hvm_set_rdtsc_exiting(d, d->arch.vtsc);
@@ -2197,10 +2197,10 @@ void tsc_set_info(struct domain *d,
              * call set_tsc_offset() later from hvm_vcpu_reset_state() and they
              * will sync their TSC to BSP's sync_tsc.
              */
-            d->arch.hvm_domain.sync_tsc = rdtsc();
+            d->arch.hvm.sync_tsc = rdtsc();
             hvm_set_tsc_offset(d->vcpu[0],
                                d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
-                               d->arch.hvm_domain.sync_tsc);
+                               d->arch.hvm.sync_tsc);
         }
     }
 
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 144ab81..4793aac 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -48,7 +48,7 @@ static int vm_event_enable(
     xen_event_channel_notification_t notification_fn)
 {
     int rc;
-    unsigned long ring_gfn = d->arch.hvm_domain.params[param];
+    unsigned long ring_gfn = d->arch.hvm.params[param];
 
     if ( !*ved )
         *ved = xzalloc(struct vm_event_domain);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index d1adffa..2644048 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1417,7 +1417,7 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
     /* Prevent device assign if mem paging or mem sharing have been 
      * enabled for this domain */
     if ( unlikely(!need_iommu(d) &&
-            (d->arch.hvm_domain.mem_sharing_enabled ||
+            (d->arch.hvm.mem_sharing_enabled ||
              vm_event_check_ring(d->vm_event_paging) ||
              p2m_get_hostp2m(d)->global_logdirty)) )
         return -EXDEV;
diff --git a/xen/drivers/vpci/msix.c b/xen/drivers/vpci/msix.c
index bcf6325..1960dae 100644
--- a/xen/drivers/vpci/msix.c
+++ b/xen/drivers/vpci/msix.c
@@ -152,7 +152,7 @@ static struct vpci_msix *msix_find(const struct domain *d, 
unsigned long addr)
 {
     struct vpci_msix *msix;
 
-    list_for_each_entry ( msix, &d->arch.hvm_domain.msix_tables, next )
+    list_for_each_entry ( msix, &d->arch.hvm.msix_tables, next )
     {
         const struct vpci_bar *bars = msix->pdev->vpci->header.bars;
         unsigned int i;
@@ -438,10 +438,10 @@ static int init_msix(struct pci_dev *pdev)
     if ( rc )
         return rc;
 
-    if ( list_empty(&d->arch.hvm_domain.msix_tables) )
+    if ( list_empty(&d->arch.hvm.msix_tables) )
         register_mmio_handler(d, &vpci_msix_table_ops);
 
-    list_add(&pdev->vpci->msix->next, &d->arch.hvm_domain.msix_tables);
+    list_add(&pdev->vpci->msix->next, &d->arch.hvm.msix_tables);
 
     return 0;
 }
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 280c395..d682307 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -51,7 +51,7 @@ struct arch_domain
     /* Virtual MMU */
     struct p2m_domain p2m;
 
-    struct hvm_domain hvm_domain;
+    struct hvm_domain hvm;
 
     struct vmmio vmmio;
 
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index fdd6856..4722c2d 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -17,7 +17,7 @@
 #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
 
 #define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
-        (d)->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector)
+        (d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector)
 #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
 #define is_domain_direct_mapped(d) ((void)(d), 0)
 
@@ -306,7 +306,7 @@ struct arch_domain
 
     union {
         struct pv_domain pv;
-        struct hvm_domain hvm_domain;
+        struct hvm_domain hvm;
     };
 
     struct paging_domain paging;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 5885950..acf8e03 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -202,7 +202,7 @@ struct hvm_domain {
     };
 };
 
-#define hap_enabled(d)  ((d)->arch.hvm_domain.hap_enabled)
+#define hap_enabled(d)  ((d)->arch.hvm.hap_enabled)
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 5ea507b..ac0f035 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -266,7 +266,7 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
     (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
 
 #define hvm_tsc_scaling_ratio(d) \
-    ((d)->arch.hvm_domain.tsc_scaling_ratio)
+    ((d)->arch.hvm.tsc_scaling_ratio)
 
 u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
 u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
@@ -391,10 +391,10 @@ static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, 
u64 *val)
 bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
 
 #define has_hvm_params(d) \
-    ((d)->arch.hvm_domain.params != NULL)
+    ((d)->arch.hvm.params != NULL)
 
 #define viridian_feature_mask(d) \
-    (has_hvm_params(d) ? (d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN] : 0)
+    (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
 
 #define is_viridian_domain(d) \
     (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
@@ -670,9 +670,8 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain 
*d, bool restore);
 #define arch_vcpu_block(v) ({                                   \
     struct vcpu *v_ = (v);                                      \
     struct domain *d_ = v_->domain;                             \
-    if ( is_hvm_domain(d_) &&                               \
-         (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
-        d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
+    if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block )  \
+        d_->arch.hvm.pi_ops.vcpu_block(v_);                     \
 })
 
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index 8a43cb9..2e6fa70 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -97,7 +97,7 @@ struct hvm_irq {
     (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
 #define hvm_pci_intx_link(dev, intx) \
     (((dev) + (intx)) & 3)
-#define hvm_domain_irq(d) ((d)->arch.hvm_domain.irq)
+#define hvm_domain_irq(d) ((d)->arch.hvm.irq)
 #define hvm_irq_size(cnt) offsetof(struct hvm_irq, gsi_assert_count[cnt])
 
 #define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h 
b/xen/include/asm-x86/hvm/nestedhvm.h
index 3c810b7..4a041e2 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -35,8 +35,8 @@ enum nestedhvm_vmexits {
 /* Nested HVM on/off per domain */
 static inline bool nestedhvm_enabled(const struct domain *d)
 {
-    return is_hvm_domain(d) && d->arch.hvm_domain.params &&
-        d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM];
+    return is_hvm_domain(d) && d->arch.hvm.params &&
+        d->arch.hvm.params[HVM_PARAM_NESTEDHVM];
 }
 
 /* Nested VCPU */
diff --git a/xen/include/asm-x86/hvm/vioapic.h 
b/xen/include/asm-x86/hvm/vioapic.h
index 138d2c0..a72cd17 100644
--- a/xen/include/asm-x86/hvm/vioapic.h
+++ b/xen/include/asm-x86/hvm/vioapic.h
@@ -58,7 +58,7 @@ struct hvm_vioapic {
 };
 
 #define hvm_vioapic_size(cnt) offsetof(struct hvm_vioapic, redirtbl[cnt])
-#define domain_vioapic(d, i) ((d)->arch.hvm_domain.vioapic[i])
+#define domain_vioapic(d, i) ((d)->arch.hvm.vioapic[i])
 #define vioapic_domain(v) ((v)->domain)
 
 int vioapic_init(struct domain *d);
diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
index 61c26ed..99169dd 100644
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -150,8 +150,8 @@ void pt_migrate(struct vcpu *v);
 
 void pt_adjust_global_vcpu_target(struct vcpu *v);
 #define pt_global_vcpu_target(d) \
-    (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \
-     (d)->arch.hvm_domain.i8259_target : \
+    (is_hvm_domain(d) && (d)->arch.hvm.i8259_target ? \
+     (d)->arch.hvm.i8259_target : \
      (d)->vcpu ? (d)->vcpu[0] : NULL)
 
 void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt);
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index d9dad39..054c3ab 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -188,8 +188,7 @@ void cleanup_domain_irq_mapping(struct domain *);
 #define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq,              \
     arch.hvm.emuirq, IRQ_UNBOUND)
 #define domain_emuirq_to_pirq(d, emuirq) ({                             \
-    void *__ret = radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq,  \
-                                    emuirq);                            \
+    void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\
     __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND;                 \
 })
 #define IRQ_UNBOUND -1
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.