[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1186572443 -3600
# Node ID 35337d5c83f99daaf3c23c96c980bdc3b1243f8d
# Parent  123ad31e9c3bb98685fea54a2e4d9f4cf57ac44f
# Parent  da2c7dab1a3ad37a9e28d1e5c090affc58bebc5d
Merge
---
 xen/arch/x86/hvm/hvm.c              |   88 +++++++-
 xen/arch/x86/hvm/svm/svm.c          |  392 +++++++++---------------------------
 xen/arch/x86/hvm/svm/vmcb.c         |   17 -
 xen/arch/x86/hvm/vioapic.c          |    4 
 xen/arch/x86/hvm/vmx/vmcs.c         |   12 -
 xen/arch/x86/hvm/vmx/vmx.c          |  386 +++++++++++------------------------
 xen/arch/x86/hvm/vmx/x86_32/exits.S |    2 
 xen/arch/x86/hvm/vmx/x86_64/exits.S |    2 
 xen/arch/x86/mm.c                   |    4 
 xen/arch/x86/mm/hap/guest_walk.c    |    2 
 xen/arch/x86/mm/hap/hap.c           |   43 +--
 xen/arch/x86/mm/shadow/common.c     |    4 
 xen/arch/x86/mm/shadow/multi.c      |   27 +-
 xen/arch/x86/x86_32/asm-offsets.c   |    2 
 xen/arch/x86/x86_64/asm-offsets.c   |    2 
 xen/include/asm-x86/hvm/hvm.h       |   69 +-----
 xen/include/asm-x86/hvm/support.h   |    3 
 xen/include/asm-x86/hvm/svm/asid.h  |   14 -
 xen/include/asm-x86/hvm/svm/vmcb.h  |    5 
 xen/include/asm-x86/hvm/vcpu.h      |   12 +
 xen/include/asm-x86/hvm/vmx/vmcs.h  |    6 
 xen/include/asm-x86/hvm/vmx/vmx.h   |    4 
 22 files changed, 393 insertions(+), 707 deletions(-)

diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Aug 08 12:27:23 2007 +0100
@@ -520,6 +520,87 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
+int hvm_set_cr3(unsigned long value)
+{
+    unsigned long old_base_mfn, mfn;
+    struct vcpu *v = current;
+
+    if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
+    {
+        /* Nothing to do. */
+    }
+    else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
+    {
+        /* Shadow-mode TLB flush. Invalidate the shadow. */
+        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
+            goto bad_cr3;
+    }
+    else 
+    {
+        /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
+        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
+        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
+            goto bad_cr3;
+
+        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+        v->arch.guest_table = pagetable_from_pfn(mfn);
+
+        if ( old_base_mfn )
+            put_page(mfn_to_page(old_base_mfn));
+
+        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
+    }
+
+    v->arch.hvm_vcpu.guest_cr[3] = value;
+    paging_update_cr3(v);
+    return 1;
+
+ bad_cr3:
+    gdprintk(XENLOG_ERR, "Invalid CR3\n");
+    domain_crash(v->domain);
+    return 0;
+}
+
+int hvm_set_cr4(unsigned long value)
+{
+    struct vcpu *v = current;
+    unsigned long old_cr;
+
+    if ( value & HVM_CR4_GUEST_RESERVED_BITS )
+    {
+        HVM_DBG_LOG(DBG_LEVEL_1,
+                    "Guest attempts to set reserved bit in CR4: %lx",
+                    value);
+        goto gpf;
+    }
+
+    if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
+    {
+        HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
+                    "EFER.LMA is set");
+        goto gpf;
+    }
+
+    old_cr = v->arch.hvm_vcpu.guest_cr[4];
+    v->arch.hvm_vcpu.guest_cr[4] = value;
+    v->arch.hvm_vcpu.hw_cr[4] = value | HVM_CR4_HOST_MASK;
+    if ( paging_mode_hap(v->domain) )
+        v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+    hvm_update_guest_cr(v, 4);
+  
+    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
+    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+        paging_update_paging_modes(v);
+
+    return 1;
+
+ gpf:
+    hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    return 0;
+}
+
 /*
  * __hvm_copy():
  *  @buf  = hypervisor buffer
@@ -668,7 +749,6 @@ static hvm_hypercall_t *hvm_hypercall32_
 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
     HYPERCALL(memory_op),
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
-    HYPERCALL(multicall),
     HYPERCALL(xen_version),
     HYPERCALL(grant_table_op),
     HYPERCALL(event_channel_op),
@@ -811,12 +891,6 @@ int hvm_do_hypercall(struct cpu_user_reg
 
     return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
             flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
-}
-
-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
-{
-    v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
-    hvm_funcs.update_guest_cr3(v);
 }
 
 static void hvm_latch_shinfo_size(struct domain *d)
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Aug 08 12:27:23 2007 +0100
@@ -78,7 +78,7 @@ static void svm_inject_exception(
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     if ( trap == TRAP_page_fault )
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
     else
         HVMTRACE_2D(INJ_EXC, v, trap, error_code);
 
@@ -97,55 +97,14 @@ static void svm_cpu_down(void)
     write_efer(read_efer() & ~EFER_SVME);
 }
 
+static int svm_lme_is_set(struct vcpu *v)
+{
 #ifdef __x86_64__
-
-static int svm_lme_is_set(struct vcpu *v)
-{
-    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+    u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
     return guest_efer & EFER_LME;
-}
-
-static int svm_long_mode_enabled(struct vcpu *v)
-{
-    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
-    return guest_efer & EFER_LMA;
-}
-
-#else /* __i386__ */
-
-static int svm_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int svm_long_mode_enabled(struct vcpu *v)
-{ return 0; }
-
+#else
+    return 0;
 #endif
-
-static int svm_cr4_pae_is_set(struct vcpu *v)
-{
-    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
-    return guest_cr4 & X86_CR4_PAE;
-}
-
-static int svm_paging_enabled(struct vcpu *v)
-{
-    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static int svm_pae_enabled(struct vcpu *v)
-{
-    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
-    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static int svm_nx_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static int svm_pgbit_test(struct vcpu *v)
-{
-    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
 }
 
 static void svm_store_cpu_guest_regs(
@@ -165,10 +124,10 @@ static void svm_store_cpu_guest_regs(
     if ( crs != NULL )
     {
         /* Returning the guest's regs */
-        crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
-        crs[2] = v->arch.hvm_svm.cpu_cr2;
-        crs[3] = v->arch.hvm_svm.cpu_cr3;
-        crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
+        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
     }
 }
 
@@ -202,7 +161,8 @@ static enum handler_return long_mode_do_
         if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
         {
             /* EFER.LME transition from 0 to 1. */
-            if ( svm_paging_enabled(v) || !svm_cr4_pae_is_set(v) )
+            if ( hvm_paging_enabled(v) ||
+                 !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
                          "in paging mode or PAE bit is not set\n");
@@ -212,7 +172,7 @@ static enum handler_return long_mode_do_
         else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
         {
             /* EFER.LME transistion from 1 to 0. */
-            if ( svm_paging_enabled(v) )
+            if ( hvm_paging_enabled(v) )
             {
                 gdprintk(XENLOG_WARNING, 
                          "Trying to clear EFER.LME while paging enabled\n");
@@ -220,9 +180,9 @@ static enum handler_return long_mode_do_
             }
         }
 
-        v->arch.hvm_svm.cpu_shadow_efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         vmcb->efer = msr_content | EFER_SVME;
-        if ( !svm_paging_enabled(v) )
+        if ( !hvm_paging_enabled(v) )
             vmcb->efer &= ~(EFER_LME | EFER_LMA);
 
         break;
@@ -297,10 +257,10 @@ int svm_vmcb_save(struct vcpu *v, struct
     c->rsp = vmcb->rsp;
     c->rflags = vmcb->rflags;
 
-    c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-    c->cr2 = v->arch.hvm_svm.cpu_cr2;
-    c->cr3 = v->arch.hvm_svm.cpu_cr3;
-    c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -383,10 +343,10 @@ int svm_vmcb_restore(struct vcpu *v, str
     vmcb->rsp    = c->rsp;
     vmcb->rflags = c->rflags;
 
-    v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
     vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
 
-    v->arch.hvm_svm.cpu_cr2 = c->cr2;
+    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -396,13 +356,13 @@ int svm_vmcb_restore(struct vcpu *v, str
             c->cr4);
 #endif
 
-    if ( !svm_paging_enabled(v) ) 
+    if ( !hvm_paging_enabled(v) ) 
     {
         printk("%s: paging not enabled.\n", __func__);
         goto skip_cr3;
     }
 
-    if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 ) 
+    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] ) 
     {
         /*
          * This is simple TLB flush, implying the guest has
@@ -428,12 +388,12 @@ int svm_vmcb_restore(struct vcpu *v, str
         v->arch.guest_table = pagetable_from_pfn(mfn);
         if (old_base_mfn)
              put_page(mfn_to_page(old_base_mfn));
-        v->arch.hvm_svm.cpu_cr3 = c->cr3;
+        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
     }
 
  skip_cr3:
     vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
-    v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
     
     vmcb->idtr.limit = c->idtr_limit;
     vmcb->idtr.base  = c->idtr_base;
@@ -488,8 +448,8 @@ int svm_vmcb_restore(struct vcpu *v, str
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 |
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
                      (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
         vmcb->cr3 = c->cr3;
         vmcb->np_enable = 1;
@@ -521,7 +481,6 @@ int svm_vmcb_restore(struct vcpu *v, str
     }
 
     paging_update_paging_modes(v);
-    svm_asid_g_update_paging(v);
 
     return 0;
  
@@ -540,7 +499,7 @@ static void svm_save_cpu_state(struct vc
     data->msr_star         = vmcb->star;
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
-    data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
+    data->msr_efer         = v->arch.hvm_vcpu.guest_efer;
     data->msr_flags        = -1ULL;
 
     data->tsc = hvm_get_guest_time(v);
@@ -556,7 +515,7 @@ static void svm_load_cpu_state(struct vc
     vmcb->star       = data->msr_star;
     vmcb->cstar      = data->msr_cstar;
     vmcb->sfmask     = data->msr_syscall_mask;
-    v->arch.hvm_svm.cpu_shadow_efer = data->msr_efer;
+    v->arch.hvm_vcpu.guest_efer = data->msr_efer;
     vmcb->efer       = data->msr_efer | EFER_SVME;
     /* VMCB's EFER.LME isn't set unless we're actually in long mode
      * (see long_mode_do_msr_write()) */
@@ -605,11 +564,11 @@ static int svm_guest_x86_mode(struct vcp
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
+    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
     if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
         return 1;
-    if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
+    if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
         return 8;
     return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
 }
@@ -619,9 +578,20 @@ static void svm_update_host_cr3(struct v
     /* SVM doesn't have a HOST_CR3 equivalent to update. */
 }
 
-static void svm_update_guest_cr3(struct vcpu *v)
-{
-    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
+static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+    switch ( cr )
+    {
+    case 3:
+        v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
+        svm_asid_inv_asid(v);
+        break;
+    case 4:
+        v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
+        break;
+    default:
+        BUG();
+    }
 }
 
 static void svm_flush_guest_tlbs(void)
@@ -639,24 +609,6 @@ static void svm_update_vtpr(struct vcpu 
     vmcb->vintr.fields.tpr = value & 0x0f;
 }
 
-static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    switch ( num )
-    {
-    case 0:
-        return v->arch.hvm_svm.cpu_shadow_cr0;
-    case 2:
-        return v->arch.hvm_svm.cpu_cr2;
-    case 3:
-        return v->arch.hvm_svm.cpu_cr3;
-    case 4:
-        return v->arch.hvm_svm.cpu_shadow_cr4;
-    default:
-        BUG();
-    }
-    return 0;                   /* dummy */
-}
-
 static void svm_sync_vmcb(struct vcpu *v)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -674,7 +626,7 @@ static unsigned long svm_get_segment_bas
 static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+    int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
 
     switch ( seg )
     {
@@ -748,7 +700,7 @@ static void svm_stts(struct vcpu *v)
      * then this is not necessary: no FPU activity can occur until the guest 
      * clears CR0.TS, and we will initialise the FPU when that happens.
      */
-    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
     {
         v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
         vmcb->cr0 |= X86_CR0_TS;
@@ -949,7 +901,7 @@ static void svm_hvm_inject_exception(
 {
     struct vcpu *v = current;
     if ( trapnr == TRAP_page_fault )
-        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
+        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
     svm_inject_exception(v, trapnr, (errcode != -1), errcode);
 }
 
@@ -970,17 +922,12 @@ static struct hvm_function_table svm_fun
     .load_cpu_guest_regs  = svm_load_cpu_guest_regs,
     .save_cpu_ctxt        = svm_save_vmcb_ctxt,
     .load_cpu_ctxt        = svm_load_vmcb_ctxt,
-    .paging_enabled       = svm_paging_enabled,
-    .long_mode_enabled    = svm_long_mode_enabled,
-    .pae_enabled          = svm_pae_enabled,
-    .nx_enabled           = svm_nx_enabled,
     .interrupts_enabled   = svm_interrupts_enabled,
     .guest_x86_mode       = svm_guest_x86_mode,
-    .get_guest_ctrl_reg   = svm_get_ctrl_reg,
     .get_segment_base     = svm_get_segment_base,
     .get_segment_register = svm_get_segment_register,
     .update_host_cr3      = svm_update_host_cr3,
-    .update_guest_cr3     = svm_update_guest_cr3,
+    .update_guest_cr      = svm_update_guest_cr,
     .flush_guest_tlbs     = svm_flush_guest_tlbs,
     .update_vtpr          = svm_update_vtpr,
     .stts                 = svm_stts,
@@ -1075,7 +1022,7 @@ static void svm_do_no_device_fault(struc
     setup_fpu(v);    
     vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
 
-    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         vmcb->cr0 &= ~X86_CR0_TS;
 }
 
@@ -1347,7 +1294,7 @@ static int svm_get_io_address(
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     /* If we're in long mode, don't check the segment presence & limit */
-    long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+    long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
 
     /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
      * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
@@ -1650,7 +1597,7 @@ static int svm_set_cr0(unsigned long val
 static int svm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
-    unsigned long mfn, old_value = v->arch.hvm_svm.cpu_shadow_cr0;
+    unsigned long mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     unsigned long old_base_mfn;
   
@@ -1687,25 +1634,25 @@ static int svm_set_cr0(unsigned long val
     {
         if ( svm_lme_is_set(v) )
         {
-            if ( !svm_cr4_pae_is_set(v) )
+            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
                 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
                 return 0;
             }
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
-            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
+            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
             vmcb->efer |= EFER_LMA | EFER_LME;
         }
 
         if ( !paging_mode_hap(v->domain) )
         {
             /* The guest CR3 must be pointing to the guest physical. */
-            mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
+            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> 
PAGE_SHIFT);
             if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
-                         v->arch.hvm_svm.cpu_cr3, mfn);
+                         v->arch.hvm_vcpu.guest_cr[3], mfn);
                 domain_crash(v->domain);
                 return 0;
             }
@@ -1717,42 +1664,36 @@ static int svm_set_cr0(unsigned long val
                 put_page(mfn_to_page(old_base_mfn));
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                        v->arch.hvm_vmx.cpu_cr3, mfn);
+                        v->arch.hvm_vcpu.guest_cr[3], mfn);
         }
     }
     else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
     {
         /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( svm_long_mode_enabled(v) )
+        if ( hvm_long_mode_enabled(v) )
         {
             vmcb->efer &= ~(EFER_LME | EFER_LMA);
-            v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
-        }
-
-        if ( !paging_mode_hap(v->domain) && v->arch.hvm_svm.cpu_cr3 )
+            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+        }
+
+        if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
         {
             put_page(mfn_to_page(get_mfn_from_gpfn(
-                v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
+                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
             v->arch.guest_table = pagetable_null();
         }
     }
 
-    vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0 = value;
+    vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] = value;
     if ( !paging_mode_hap(v->domain) )
         vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
 
     if ( (value ^ old_value) & X86_CR0_PG )
-    {
         paging_update_paging_modes(v);
-        svm_asid_g_update_paging(v);
-    }
 
     return 1;
 }
 
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
 {
     unsigned long value = 0;
@@ -1763,16 +1704,16 @@ static void mov_from_cr(int cr, int gp, 
     switch ( cr )
     {
     case 0:
-        value = v->arch.hvm_svm.cpu_shadow_cr0;
+        value = v->arch.hvm_vcpu.guest_cr[0];
         break;
     case 2:
         value = vmcb->cr2;
         break;
     case 3:
-        value = (unsigned long)v->arch.hvm_svm.cpu_cr3;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
         break;
     case 4:
-        value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
         break;
     case 8:
         value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -1791,13 +1732,9 @@ static void mov_from_cr(int cr, int gp, 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
 }
 
-
-/*
- * Write to control registers
- */
 static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
 {
-    unsigned long value, old_cr, old_base_mfn, mfn;
+    unsigned long value;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1815,131 +1752,10 @@ static int mov_to_cr(int gpreg, int cr, 
         return svm_set_cr0(value);
 
     case 3:
-        if ( paging_mode_hap(v->domain) )
-        {
-            vmcb->cr3 = v->arch.hvm_svm.cpu_cr3 = value;
-            break;
-        }
-
-        /* If paging is not enabled yet, simply copy the value to CR3. */
-        if ( !svm_paging_enabled(v) )
-        {
-            v->arch.hvm_svm.cpu_cr3 = value;
-            break;
-        }
-
-        /* We make a new one if the shadow does not exist. */
-        if ( value == v->arch.hvm_svm.cpu_cr3 )
-        {
-            /* 
-             * This is simple TLB flush, implying the guest has 
-             * removed some translation or changed page attributes.
-             * We simply invalidate the shadow.
-             */
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
-                goto bad_cr3;
-            paging_update_cr3(v);
-            /* signal paging update to ASID handler */
-            svm_asid_g_mov_to_cr3 (v);
-        }
-        else 
-        {
-            /*
-             * If different, make a shadow. Check if the PDBR is valid
-             * first.
-             */
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
-                goto bad_cr3;
-
-            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = pagetable_from_pfn(mfn);
-
-            if ( old_base_mfn )
-                put_page(mfn_to_page(old_base_mfn));
-
-            v->arch.hvm_svm.cpu_cr3 = value;
-            update_cr3(v);
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
-            /* signal paging update to ASID handler */
-            svm_asid_g_mov_to_cr3 (v);
-        }
-        break;
-
-    case 4: /* CR4 */
-        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
-        {
-            HVM_DBG_LOG(DBG_LEVEL_1,
-                        "Guest attempts to set reserved bit in CR4: %lx",
-                        value);
-            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
-            break;
-        }
-
-        if ( paging_mode_hap(v->domain) )
-        {
-            v->arch.hvm_svm.cpu_shadow_cr4 = value;
-            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
-            paging_update_paging_modes(v);
-            /* signal paging update to ASID handler */
-            svm_asid_g_update_paging (v);
-            break;
-        }
-
-        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
-        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
-        {
-            if ( svm_pgbit_test(v) )
-            {
-#if CONFIG_PAGING_LEVELS >= 3
-                /* The guest is a 32-bit PAE guest. */
-                unsigned long mfn, old_base_mfn;
-                mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
-                if ( !mfn_valid(mfn) || 
-                     !get_page(mfn_to_page(mfn), v->domain) )
-                    goto bad_cr3;
-
-                /*
-                 * Now arch.guest_table points to machine physical.
-                 */
-                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-                v->arch.guest_table = pagetable_from_pfn(mfn);
-                if ( old_base_mfn )
-                    put_page(mfn_to_page(old_base_mfn));
-                paging_update_paging_modes(v);
-                /* signal paging update to ASID handler */
-                svm_asid_g_update_paging (v);
-
-                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
-                            "Update CR3 value = %lx, mfn = %lx",
-                            v->arch.hvm_svm.cpu_cr3, mfn);
-#endif
-            }
-        } 
-        else if ( !(value & X86_CR4_PAE) )
-        {
-            if ( svm_long_mode_enabled(v) )
-            {
-                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
-            }
-        }
-
-        v->arch.hvm_svm.cpu_shadow_cr4 = value;
-        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
-  
-        /*
-         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
-         * all TLB entries except global entries.
-         */
-        if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
-        {
-            paging_update_paging_modes(v);
-            /* signal paging update to ASID handler */
-            svm_asid_g_update_paging (v);
-        }
-        break;
+        return hvm_set_cr3(value);
+
+    case 4:
+        return hvm_set_cr4(value);
 
     case 8:
         vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1953,19 +1769,11 @@ static int mov_to_cr(int gpreg, int cr, 
     }
 
     return 1;
-
- bad_cr3:
-    gdprintk(XENLOG_ERR, "Invalid CR3\n");
-    domain_crash(v->domain);
-    return 0;
-}
-
-
-#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
-
-
-static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
-                         struct cpu_user_regs *regs)
+}
+
+static void svm_cr_access(
+    struct vcpu *v, unsigned int cr, unsigned int type,
+    struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int inst_len = 0;
@@ -1990,12 +1798,12 @@ static int svm_cr_access(struct vcpu *v,
     if ( type == TYPE_MOV_TO_CR )
     {
         inst_len = __get_instruction_length_from_list(
-            v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
+            v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
     }
     else /* type == TYPE_MOV_FROM_CR */
     {
         inst_len = __get_instruction_length_from_list(
-            v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
+            v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
     }
 
     ASSERT(inst_len > 0);
@@ -2008,7 +1816,8 @@ static int svm_cr_access(struct vcpu *v,
 
     HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
 
-    switch (match) 
+    switch ( match )
+
     {
     case INSTR_MOV2CR:
         gpreg = decode_src_reg(prefix, buffer[index+2]);
@@ -2025,18 +1834,18 @@ static int svm_cr_access(struct vcpu *v,
         setup_fpu(current);
         vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
         vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
-        v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
         break;
 
     case INSTR_LMSW:
         gpreg = decode_src_reg(prefix, buffer[index+2]);
         value = get_reg(gpreg, regs, vmcb) & 0xF;
-        value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
+        value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
         result = svm_set_cr0(value);
         break;
 
     case INSTR_SMSW:
-        value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
+        value = v->arch.hvm_vcpu.guest_cr[0] & 0xFFFF;
         modrm = buffer[index+2];
         addr_size = svm_guest_x86_mode(v);
         if ( addr_size < 2 )
@@ -2099,9 +1908,8 @@ static int svm_cr_access(struct vcpu *v,
 
     ASSERT(inst_len);
 
-    __update_guest_eip(vmcb, inst_len);
-    
-    return result;
+    if ( result )
+        __update_guest_eip(vmcb, inst_len);
 }
 
 static void svm_do_msr_access(
@@ -2129,7 +1937,7 @@ static void svm_do_msr_access(
             break;
 
         case MSR_EFER:
-            msr_content = v->arch.hvm_svm.cpu_shadow_efer;
+            msr_content = v->arch.hvm_vcpu.guest_efer;
             break;
 
         case MSR_K8_MC4_MISC: /* Threshold register */
@@ -2319,8 +2127,7 @@ void svm_handle_invlpg(const short invlp
     HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
 
     paging_invlpg(v, g_vaddr);
-    /* signal invplg to ASID handler */
-    svm_asid_g_invlpg (v, g_vaddr);
+    svm_asid_g_invlpg(v, g_vaddr);
 }
 
 
@@ -2335,29 +2142,28 @@ static int svm_reset_to_realmode(struct 
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    /* clear the vmcb and user regs */
     memset(regs, 0, sizeof(struct cpu_user_regs));
-   
-    /* VMCB State */
+
     vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
-    v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
+    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
 
     vmcb->cr2 = 0;
     vmcb->efer = EFER_SVME;
 
     vmcb->cr4 = HVM_CR4_HOST_MASK;
-    v->arch.hvm_svm.cpu_shadow_cr4 = 0;
-
-    if ( paging_mode_hap(v->domain) ) {
-        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
-                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+    v->arch.hvm_vcpu.guest_cr[4] = 0;
+
+    if ( paging_mode_hap(v->domain) )
+    {
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
+                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
     }
 
     /* This will jump to ROMBIOS */
     vmcb->rip = 0xFFF0;
 
-    /* setup the segment registers and all their hidden states */
+    /* Set up the segment registers and all their hidden states. */
     vmcb->cs.sel = 0xF000;
     vmcb->cs.attr.bytes = 0x089b;
     vmcb->cs.limit = 0xffff;
@@ -2495,7 +2301,7 @@ asmlinkage void svm_vmexit_handler(struc
             break;
         }
 
-        v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
+        v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
         svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
         break;
     }
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Wed Aug 08 12:27:23 2007 +0100
@@ -111,7 +111,7 @@ static int construct_vmcb(struct vcpu *v
     svm_segment_attributes_t attrib;
 
     /* TLB control, and ASID assigment. */
-    svm_asid_init_vcpu (v);
+    svm_asid_init_vcpu(v);
 
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
@@ -218,25 +218,24 @@ static int construct_vmcb(struct vcpu *v
 
     /* Guest CR0. */
     vmcb->cr0 = read_cr0();
-    arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
-    vmcb->cr0 |= X86_CR0_WP;
+    v->arch.hvm_vcpu.guest_cr[0] = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
 
     /* Guest CR4. */
-    arch_svm->cpu_shadow_cr4 =
+    v->arch.hvm_vcpu.guest_cr[4] =
         read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
-    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
+    vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
 
     paging_update_paging_modes(v);
-    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
+    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; 
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = arch_svm->cpu_shadow_cr0;
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
-                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+        vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] =
+            HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
         vmcb->exception_intercepts = HVM_TRAP_MASK;
 
         /* No point in intercepting CR3/4 reads, because the hardware 
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vioapic.c        Wed Aug 08 12:27:23 2007 +0100
@@ -43,10 +43,6 @@
 /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
 #define IRQ0_SPECIAL_ROUTING 1
 
-#if defined(__ia64__)
-#define opt_hvm_debug_level opt_vmx_debug_level
-#endif
-
 static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
 
 static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Aug 08 12:27:23 2007 +0100
@@ -506,17 +506,17 @@ static void construct_vmcs(struct vcpu *
 
     /* Guest CR0. */
     cr0 = read_cr0();
-    v->arch.hvm_vmx.cpu_cr0 = cr0;
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+    v->arch.hvm_vcpu.hw_cr[0] = cr0;
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+    v->arch.hvm_vcpu.guest_cr[0] = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
 
     /* Guest CR4. */
     cr4 = read_cr4();
     __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
-    v->arch.hvm_vmx.cpu_shadow_cr4 =
+    v->arch.hvm_vcpu.guest_cr[4] =
         cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     if ( cpu_has_vmx_tpr_shadow )
     {
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 08 12:27:23 2007 +0100
@@ -100,39 +100,11 @@ static void vmx_vcpu_destroy(struct vcpu
     vmx_destroy_vmcs(v);
 }
 
-static int vmx_paging_enabled(struct vcpu *v)
-{
-    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
-}
-
-static int vmx_pgbit_test(struct vcpu *v)
-{
-    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    return cr0 & X86_CR0_PG;
-}
-
-static int vmx_pae_enabled(struct vcpu *v)
-{
-    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
-    return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
-}
-
-static int vmx_nx_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_vmx.efer & EFER_NX;
-}
-
 #ifdef __x86_64__
 
 static int vmx_lme_is_set(struct vcpu *v)
 {
-    return v->arch.hvm_vmx.efer & EFER_LME;
-}
-
-static int vmx_long_mode_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_vmx.efer & EFER_LMA;
+    return v->arch.hvm_vcpu.guest_efer & EFER_LME;
 }
 
 static void vmx_enable_long_mode(struct vcpu *v)
@@ -143,7 +115,7 @@ static void vmx_enable_long_mode(struct 
     vm_entry_value |= VM_ENTRY_IA32E_MODE;
     __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
-    v->arch.hvm_vmx.efer |= EFER_LMA;
+    v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
 }
 
 static void vmx_disable_long_mode(struct vcpu *v)
@@ -154,7 +126,7 @@ static void vmx_disable_long_mode(struct
     vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
     __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
-    v->arch.hvm_vmx.efer &= ~EFER_LMA;
+    v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
 }
 
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
@@ -190,7 +162,7 @@ static enum handler_return long_mode_do_
     switch ( ecx )
     {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vmx.efer;
+        msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     case MSR_FS_BASE:
@@ -204,7 +176,7 @@ static enum handler_return long_mode_do_
     case MSR_SHADOW_GS_BASE:
         msr_content = v->arch.hvm_vmx.shadow_gs;
     check_long_mode:
-        if ( !(vmx_long_mode_enabled(v)) )
+        if ( !(hvm_long_mode_enabled(v)) )
         {
             vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
             return HNDL_exception_raised;
@@ -263,9 +235,9 @@ static enum handler_return long_mode_do_
         }
 
         if ( (msr_content & EFER_LME)
-             &&  !(v->arch.hvm_vmx.efer & EFER_LME) )
-        {
-            if ( unlikely(vmx_paging_enabled(v)) )
+             &&  !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+        {
+            if ( unlikely(hvm_paging_enabled(v)) )
             {
                 gdprintk(XENLOG_WARNING,
                          "Trying to set EFER.LME with paging enabled\n");
@@ -273,9 +245,9 @@ static enum handler_return long_mode_do_
             }
         }
         else if ( !(msr_content & EFER_LME)
-                  && (v->arch.hvm_vmx.efer & EFER_LME) )
-        {
-            if ( unlikely(vmx_paging_enabled(v)) )
+                  && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+        {
+            if ( unlikely(hvm_paging_enabled(v)) )
             {
                 gdprintk(XENLOG_WARNING,
                          "Trying to clear EFER.LME with paging enabled\n");
@@ -283,17 +255,17 @@ static enum handler_return long_mode_do_
             }
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
+        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
             write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
                        (msr_content & (EFER_NX|EFER_SCE)));
 
-        v->arch.hvm_vmx.efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         break;
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        if ( !vmx_long_mode_enabled(v) )
+        if ( !hvm_long_mode_enabled(v) )
             goto gp_fault;
 
         if ( !is_canonical_address(msr_content) )
@@ -394,21 +366,19 @@ static void vmx_restore_guest_msrs(struc
         clear_bit(i, &guest_flags);
     }
 
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
+    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
-                    v->arch.hvm_vmx.efer);
+                    v->arch.hvm_vcpu.guest_efer);
         write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
-                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
+                   (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
     }
 }
 
 #else  /* __i386__ */
 
 static int vmx_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int vmx_long_mode_enabled(struct vcpu *v)
 { return 0; }
 static void vmx_enable_long_mode(struct vcpu *v)
 { BUG(); }
@@ -427,13 +397,13 @@ static void vmx_restore_host_msrs(void)
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
 {
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
+    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
-                    v->arch.hvm_vmx.efer);
+                    v->arch.hvm_vcpu.guest_efer);
         write_efer((read_efer() & ~EFER_NX) |
-                   (v->arch.hvm_vmx.efer & EFER_NX));
+                   (v->arch.hvm_vcpu.guest_efer & EFER_NX));
     }
 }
 
@@ -444,7 +414,7 @@ static enum handler_return long_mode_do_
 
     switch ( regs->ecx ) {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vmx.efer;
+        msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     default:
@@ -475,10 +445,10 @@ static enum handler_return long_mode_do_
             return HNDL_exception_raised;
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
+        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
             write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
 
-        v->arch.hvm_vmx.efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         break;
 
     default:
@@ -501,12 +471,12 @@ static int vmx_guest_x86_mode(struct vcp
 
     ASSERT(v == current);
 
-    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
+    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
     if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
         return 1;
     cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-    if ( vmx_long_mode_enabled(v) &&
+    if ( hvm_long_mode_enabled(v) &&
          likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         return 8;
     return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
@@ -551,12 +521,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
     c->rsp = __vmread(GUEST_RSP);
     c->rflags = __vmread(GUEST_RFLAGS);
 
-    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
-    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
-    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
-
-    c->msr_efer = v->arch.hvm_vmx.efer;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+
+    c->msr_efer = v->arch.hvm_vcpu.guest_efer;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -635,22 +605,22 @@ int vmx_vmcs_restore(struct vcpu *v, str
     __vmwrite(GUEST_RSP, c->rsp);
     __vmwrite(GUEST_RFLAGS, c->rflags);
 
-    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
-                               X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
-    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
-
-    v->arch.hvm_vmx.efer = c->msr_efer;
+    v->arch.hvm_vcpu.hw_cr[0] = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
+                                 X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
+
+    v->arch.hvm_vcpu.guest_efer = c->msr_efer;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
            __func__, c->cr3, c->cr0, c->cr4);
 #endif
 
-    if ( !vmx_paging_enabled(v) )
+    if ( !hvm_paging_enabled(v) )
     {
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
         goto skip_cr3;
@@ -672,14 +642,14 @@ int vmx_vmcs_restore(struct vcpu *v, str
         put_page(mfn_to_page(old_base_mfn));
 
  skip_cr3:
-    v->arch.hvm_vmx.cpu_cr3 = c->cr3;
-
-    if ( vmx_long_mode_enabled(v) )
+    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+
+    if ( hvm_long_mode_enabled(v) )
         vmx_enable_long_mode(v);
 
     __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
-    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
     __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -884,10 +854,10 @@ static void vmx_store_cpu_guest_regs(
 
     if ( crs != NULL )
     {
-        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
-        crs[2] = v->arch.hvm_vmx.cpu_cr2;
-        crs[3] = v->arch.hvm_vmx.cpu_cr3;
-        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
+        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
     }
 
     vmx_vmcs_exit(v);
@@ -928,24 +898,6 @@ static void vmx_load_cpu_guest_regs(stru
     vmx_vmcs_exit(v);
 }
 
-static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    switch ( num )
-    {
-    case 0:
-        return v->arch.hvm_vmx.cpu_cr0;
-    case 2:
-        return v->arch.hvm_vmx.cpu_cr2;
-    case 3:
-        return v->arch.hvm_vmx.cpu_cr3;
-    case 4:
-        return v->arch.hvm_vmx.cpu_shadow_cr4;
-    default:
-        BUG();
-    }
-    return 0;                   /* dummy */
-}
-
 static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
 {
     unsigned long base = 0;
@@ -953,7 +905,7 @@ static unsigned long vmx_get_segment_bas
 
     ASSERT(v == current);
 
-    if ( vmx_long_mode_enabled(v) &&
+    if ( hvm_long_mode_enabled(v) &&
          (__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
         long_mode = 1;
 
@@ -1059,10 +1011,10 @@ static void vmx_stts(struct vcpu *v)
      * then this is not necessary: no FPU activity can occur until the guest
      * clears CR0.TS, and we will initialise the FPU when that happens.
      */
-    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
-    {
-        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
         __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
     }
 }
@@ -1135,11 +1087,25 @@ static void vmx_update_host_cr3(struct v
     vmx_vmcs_exit(v);
 }
 
-static void vmx_update_guest_cr3(struct vcpu *v)
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
 {
     ASSERT((v == current) || !vcpu_runnable(v));
+
     vmx_vmcs_enter(v);
-    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+
+    switch ( cr )
+    {
+    case 3:
+        __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+        break;
+    case 4:
+        __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+        break;
+    default:
+        BUG();
+    }
+
     vmx_vmcs_exit(v);
 }
 
@@ -1156,7 +1122,7 @@ static void vmx_inject_exception(
     struct vcpu *v = current;
     vmx_inject_hw_exception(v, trapnr, errcode);
     if ( trapnr == TRAP_page_fault )
-        v->arch.hvm_vmx.cpu_cr2 = cr2;
+        v->arch.hvm_vcpu.guest_cr[2] = cr2;
 }
 
 static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
@@ -1200,17 +1166,12 @@ static struct hvm_function_table vmx_fun
     .load_cpu_guest_regs  = vmx_load_cpu_guest_regs,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
-    .paging_enabled       = vmx_paging_enabled,
-    .long_mode_enabled    = vmx_long_mode_enabled,
-    .pae_enabled          = vmx_pae_enabled,
-    .nx_enabled           = vmx_nx_enabled,
     .interrupts_enabled   = vmx_interrupts_enabled,
     .guest_x86_mode       = vmx_guest_x86_mode,
-    .get_guest_ctrl_reg   = vmx_get_ctrl_reg,
     .get_segment_base     = vmx_get_segment_base,
     .get_segment_register = vmx_get_segment_register,
     .update_host_cr3      = vmx_update_host_cr3,
-    .update_guest_cr3     = vmx_update_guest_cr3,
+    .update_guest_cr      = vmx_update_guest_cr,
     .flush_guest_tlbs     = vmx_flush_guest_tlbs,
     .update_vtpr          = vmx_update_vtpr,
     .stts                 = vmx_stts,
@@ -1315,10 +1276,10 @@ static void vmx_do_no_device_fault(void)
     __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
 
     /* Disable TS in guest CR0 unless the guest wants the exception too. */
-    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
-    {
-        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
     }
 }
 
@@ -1773,7 +1734,7 @@ static void vmx_do_str_pio(unsigned long
 
     sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
     ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-    if ( vmx_long_mode_enabled(current) &&
+    if ( hvm_long_mode_enabled(current) &&
          (ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         long_mode = 1;
     addr = __vmread(GUEST_LINEAR_ADDRESS);
@@ -1900,9 +1861,9 @@ static void vmx_world_save(struct vcpu *
     c->esp = __vmread(GUEST_RSP);
     c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
 
-    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
-    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
 
     c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
     c->idtr_base = __vmread(GUEST_IDTR_BASE);
@@ -1959,13 +1920,13 @@ static int vmx_world_restore(struct vcpu
     __vmwrite(GUEST_RSP, c->esp);
     __vmwrite(GUEST_RFLAGS, c->eflags);
 
-    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
-    if ( !vmx_paging_enabled(v) )
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+    if ( !hvm_paging_enabled(v) )
         goto skip_cr3;
 
-    if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
+    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
     {
         /*
          * This is simple TLB flush, implying the guest has
@@ -1990,18 +1951,18 @@ static int vmx_world_restore(struct vcpu
         v->arch.guest_table = pagetable_from_pfn(mfn);
         if ( old_base_mfn )
              put_page(mfn_to_page(old_base_mfn));
-        v->arch.hvm_vmx.cpu_cr3 = c->cr3;
+        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
     }
 
  skip_cr3:
-    if ( !vmx_paging_enabled(v) )
+    if ( !hvm_paging_enabled(v) )
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
     else
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
 
     __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
-    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
     __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -2184,22 +2145,22 @@ static int vmx_set_cr0(unsigned long val
         __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
     }
 
-    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
+    old_cr0 = v->arch.hvm_vcpu.guest_cr[0];
     paging_enabled = old_cr0 & X86_CR0_PG;
 
-    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
-                               | X86_CR0_NE | X86_CR0_WP);
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
-    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+    v->arch.hvm_vcpu.hw_cr[0] = (value | X86_CR0_PE | X86_CR0_PG |
+                                 X86_CR0_NE | X86_CR0_WP);
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+    v->arch.hvm_vcpu.guest_cr[0] = value;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
 
     /* Trying to enable paging. */
     if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
     {
-        if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
-        {
-            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
+        if ( vmx_lme_is_set(v) && !hvm_long_mode_enabled(v) )
+        {
+            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
                             "with EFER.LME set but not CR4.PAE");
@@ -2214,11 +2175,11 @@ static int vmx_set_cr0(unsigned long val
         /*
          * The guest CR3 must be pointing to the guest physical.
          */
-        mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
+        mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
         if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
         {
             gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
-                     v->arch.hvm_vmx.cpu_cr3, mfn);
+                     v->arch.hvm_vcpu.guest_cr[3], mfn);
             domain_crash(v->domain);
             return 0;
         }
@@ -2232,7 +2193,7 @@ static int vmx_set_cr0(unsigned long val
             put_page(mfn_to_page(old_base_mfn));
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                    v->arch.hvm_vmx.cpu_cr3, mfn);
+                    v->arch.hvm_vcpu.guest_cr[3], mfn);
 
         paging_update_paging_modes(v);
     }
@@ -2242,13 +2203,13 @@ static int vmx_set_cr0(unsigned long val
          paging_enabled )
     {
         /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( vmx_long_mode_enabled(v) )
+        if ( hvm_long_mode_enabled(v) )
             vmx_disable_long_mode(v);
 
-        if ( v->arch.hvm_vmx.cpu_cr3 )
+        if ( v->arch.hvm_vcpu.guest_cr[3] )
         {
             put_page(mfn_to_page(get_mfn_from_gpfn(
-                      v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
+                      v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
             v->arch.guest_table = pagetable_null();
         }
     }
@@ -2316,12 +2277,9 @@ static int vmx_set_cr0(unsigned long val
     CASE_ ## T ## ET_REG(R15, r15)
 #endif
 
-/*
- * Write to control registers
- */
 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
 {
-    unsigned long value, old_cr, old_base_mfn, mfn;
+    unsigned long value;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
 
@@ -2353,108 +2311,10 @@ static int mov_to_cr(int gp, int cr, str
         return vmx_set_cr0(value);
 
     case 3:
-        /*
-         * If paging is not enabled yet, simply copy the value to CR3.
-         */
-        if ( !vmx_paging_enabled(v) )
-        {
-            v->arch.hvm_vmx.cpu_cr3 = value;
-            break;
-        }
-
-        /*
-         * We make a new one if the shadow does not exist.
-         */
-        if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
-            /*
-             * This is simple TLB flush, implying the guest has
-             * removed some translation or changed page attributes.
-             * We simply invalidate the shadow.
-             */
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
-                goto bad_cr3;
-            paging_update_cr3(v);
-        } else {
-            /*
-             * If different, make a shadow. Check if the PDBR is valid
-             * first.
-             */
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
-                goto bad_cr3;
-            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = pagetable_from_pfn(mfn);
-            if ( old_base_mfn )
-                put_page(mfn_to_page(old_base_mfn));
-            v->arch.hvm_vmx.cpu_cr3 = value;
-            update_cr3(v);
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
-        }
-        break;
-
-    case 4: /* CR4 */
-        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
-
-        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
-        {
-            HVM_DBG_LOG(DBG_LEVEL_1,
-                        "Guest attempts to set reserved bit in CR4: %lx",
-                        value);
-            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-            return 0;
-        }
-
-        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
-        {
-            if ( vmx_pgbit_test(v) )
-            {
-#if CONFIG_PAGING_LEVELS >= 3
-                /* The guest is a 32-bit PAE guest. */
-                unsigned long mfn, old_base_mfn;
-                mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
-                if ( !mfn_valid(mfn) ||
-                     !get_page(mfn_to_page(mfn), v->domain) )
-                    goto bad_cr3;
-
-                /*
-                 * Now arch.guest_table points to machine physical.
-                 */
-                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-                v->arch.guest_table = pagetable_from_pfn(mfn);
-                if ( old_base_mfn )
-                    put_page(mfn_to_page(old_base_mfn));
-
-                HVM_DBG_LOG(DBG_LEVEL_VMMU,
-                            "Update CR3 value = %lx, mfn = %lx",
-                            v->arch.hvm_vmx.cpu_cr3, mfn);
-#endif
-            }
-        }
-        else if ( !(value & X86_CR4_PAE) )
-        {
-            if ( unlikely(vmx_long_mode_enabled(v)) )
-            {
-                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
-                            "EFER.LMA is set");
-                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-                return 0;
-            }
-        }
-
-        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
-        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
-        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
-
-        /*
-         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
-         * all TLB entries except global entries.
-         */
-        if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
-            paging_update_paging_modes(v);
-
-        break;
+        return hvm_set_cr3(value);
+
+    case 4:
+        return hvm_set_cr4(value);
 
     case 8:
         vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -2462,14 +2322,11 @@ static int mov_to_cr(int gp, int cr, str
 
     default:
         gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        domain_crash(v->domain);
-        return 0;
+        goto exit_and_crash;
     }
 
     return 1;
 
- bad_cr3:
-    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  exit_and_crash:
     domain_crash(v->domain);
     return 0;
@@ -2487,7 +2344,7 @@ static void mov_from_cr(int cr, int gp, 
     switch ( cr )
     {
     case 3:
-        value = (unsigned long)v->arch.hvm_vmx.cpu_cr3;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
         break;
     case 8:
         value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -2530,7 +2387,8 @@ static int vmx_cr_access(unsigned long e
     unsigned long value;
     struct vcpu *v = current;
 
-    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
+    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
+    {
     case TYPE_MOV_TO_CR:
         gp = exit_qualification & CONTROL_REG_ACCESS_REG;
         cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
@@ -2545,14 +2403,14 @@ static int vmx_cr_access(unsigned long e
         setup_fpu(v);
         __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
 
-        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
-        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
         break;
     case TYPE_LMSW:
-        value = v->arch.hvm_vmx.cpu_shadow_cr0;
+        value = v->arch.hvm_vcpu.guest_cr[0];
         value = (value & ~0xF) |
             (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
         return vmx_set_cr0(value);
@@ -2943,7 +2801,7 @@ asmlinkage void vmx_vmexit_handler(struc
                 break;
             }
 
-            v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
+            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
             vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
             break;
         case TRAP_nmi:
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S       Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S       Wed Aug 08 12:27:23 2007 +0100
@@ -74,7 +74,7 @@ ENTRY(vmx_asm_do_vmentry)
         jnz  vmx_process_softirqs
 
         call vmx_intr_assist
-        movl VCPU_vmx_cr2(%ebx),%eax
+        movl VCPU_hvm_guest_cr2(%ebx),%eax
         movl %eax,%cr2
         call vmx_trace_vmentry
 
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S       Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S       Wed Aug 08 12:27:23 2007 +0100
@@ -88,7 +88,7 @@ ENTRY(vmx_asm_do_vmentry)
         jnz   vmx_process_softirqs
 
         call vmx_intr_assist
-        movq VCPU_vmx_cr2(%rbx),%rax
+        movq VCPU_hvm_guest_cr2(%rbx),%rax
         movq %rax,%cr2
         call vmx_trace_vmentry
 
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm.c Wed Aug 08 12:27:23 2007 +0100
@@ -394,8 +394,8 @@ void write_ptbase(struct vcpu *v)
     write_cr3(v->arch.cr3);
 }
 
-/* Should be called after CR3 is updated.
- * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
+/*
+ * Should be called after CR3 is updated.
  * 
  * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
  * for HVM guests, arch.monitor_table and hvm's guest CR3.
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Wed Aug 08 12:27:23 2007 +0100
@@ -62,7 +62,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
 unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
     struct vcpu *v, unsigned long gva)
 {
-    unsigned long gcr3 = hvm_get_guest_ctrl_reg(v, 3);
+    unsigned long gcr3 = v->arch.hvm_vcpu.guest_cr[3];
     int mode = GUEST_PAGING_LEVELS;
     int lev, index;
     paddr_t gpa = 0;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Wed Aug 08 12:27:23 2007 +0100
@@ -603,47 +603,36 @@ static int hap_invlpg(struct vcpu *v, un
     return 0;
 }
 
-/*
- * HAP guests do not need to take any action on CR3 writes (they are still
- * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
- */
 static void hap_update_cr3(struct vcpu *v, int do_locking)
 {
+    v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
+    hvm_update_guest_cr(v, 3);
 }
 
 static void hap_update_paging_modes(struct vcpu *v)
 {
-    struct domain *d;
-
-    d = v->domain;
+    struct domain *d = v->domain;
+
     hap_lock(d);
 
-    /* update guest paging mode. Note that we rely on hvm functions to detect
-     * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
-     * reflect guest's status correctly.
-     */
-    if ( hvm_paging_enabled(v) )
-    {
-        if ( hvm_long_mode_enabled(v) )
-            v->arch.paging.mode = &hap_paging_long_mode;
-        else if ( hvm_pae_enabled(v) )
-            v->arch.paging.mode = &hap_paging_pae_mode;
-        else
-            v->arch.paging.mode = &hap_paging_protected_mode;
-    }
-    else
-    {
-        v->arch.paging.mode = &hap_paging_real_mode;
-    }
-
-    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+    v->arch.paging.mode =
+        !hvm_paging_enabled(v)   ? &hap_paging_real_mode :
+        hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
+        hvm_pae_enabled(v)       ? &hap_paging_pae_mode  :
+                                   &hap_paging_protected_mode;
+
+    v->arch.paging.translate_enabled = hvm_paging_enabled(v);
 
     if ( pagetable_is_null(v->arch.monitor_table) )
     {
         mfn_t mmfn = hap_make_monitor_table(v);
         v->arch.monitor_table = pagetable_from_mfn(mmfn);
         make_cr3(v, mfn_x(mmfn));
-    }
+        hvm_update_host_cr3(v);
+    }
+
+    /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
+    hap_update_cr3(v, 0);
 
     hap_unlock(d);
 }
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Aug 08 12:27:23 2007 +0100
@@ -2266,7 +2266,7 @@ static void sh_update_paging_modes(struc
         ASSERT(shadow_mode_translate(d));
         ASSERT(shadow_mode_external(d));
 
-        v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+        v->arch.paging.translate_enabled = hvm_paging_enabled(v);
         if ( !v->arch.paging.translate_enabled )
         {
             /* Set v->arch.guest_table to use the p2m map, and choose
@@ -2347,7 +2347,7 @@ static void sh_update_paging_modes(struc
             SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
                           "(was g=%u s=%u)\n",
                           d->domain_id, v->vcpu_id,
-                          is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
+                          is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
                           v->arch.paging.mode->guest_levels,
                           v->arch.paging.mode->shadow.shadow_levels,
                           old_mode ? old_mode->guest_levels : 0,
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Aug 08 12:27:23 2007 +0100
@@ -175,7 +175,7 @@ guest_supports_superpages(struct vcpu *v
     /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
      * CR4.PSE is set or the guest is in PAE or long mode */
     return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 
-                             || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
+                             || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
 }
 
 static inline int
@@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
  * Paravirtual guests should set v->arch.guest_table (and guest_table_user,
  * if appropriate).
  * HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
- * this function will call hvm_update_guest_cr3() to tell them where the 
+ * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
  * shadow tables are.
  * If do_locking != 0, assume we are being called from outside the 
  * shadow code, and must take and release the shadow lock; otherwise 
@@ -3525,7 +3525,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         // Is paging enabled on this vcpu?
         if ( paging_vcpu_mode_translate(v) )
         {
-            gfn = _gfn(paddr_to_pfn(hvm_get_guest_ctrl_reg(v, 3)));
+            gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
             gmfn = vcpu_gfn_to_mfn(v, gfn);
             ASSERT(mfn_valid(gmfn));
             ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
@@ -3576,11 +3576,11 @@ sh_update_cr3(struct vcpu *v, int do_loc
  
      if ( shadow_mode_external(d) && paging_vcpu_mode_translate(v) ) 
          /* Paging enabled: find where in the page the l3 table is */
-         guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
-    else
-        /* Paging disabled or PV: l3 is at the start of a page */ 
-        guest_idx = 0; 
-     
+         guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
+     else
+         /* Paging disabled or PV: l3 is at the start of a page */ 
+         guest_idx = 0; 
+
      // Ignore the low 2 bits of guest_idx -- they are really just
      // cache control.
      guest_idx &= ~3;
@@ -3718,18 +3718,21 @@ sh_update_cr3(struct vcpu *v, int do_loc
 
 
     ///
-    /// v->arch.hvm_vcpu.hw_cr3
+    /// v->arch.hvm_vcpu.hw_cr[3]
     ///
     if ( shadow_mode_external(d) )
     {
         ASSERT(is_hvm_domain(d));
 #if SHADOW_PAGING_LEVELS == 3
         /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
-        hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
+        v->arch.hvm_vcpu.hw_cr[3] =
+            virt_to_maddr(&v->arch.paging.shadow.l3table);
 #else
         /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
-        hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
-#endif
+        v->arch.hvm_vcpu.hw_cr[3] =
+            pagetable_get_paddr(v->arch.shadow_table[0]);
+#endif
+        hvm_update_guest_cr(v, 3);
     }
 
     /* Fix up the linear pagetable mappings */
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Aug 08 12:27:23 2007 +0100
@@ -85,7 +85,7 @@ void __dummy__(void)
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
     BLANK();
 
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Aug 08 12:27:23 2007 +0100
@@ -88,7 +88,7 @@ void __dummy__(void)
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
     BLANK();
 
     OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Aug 08 12:27:23 2007 +0100
@@ -95,36 +95,26 @@ struct hvm_function_table {
 
     /*
      * Examine specifics of the guest state:
-     * 1) determine whether paging is enabled,
-     * 2) determine whether long mode is enabled,
-     * 3) determine whether PAE paging is enabled,
-     * 4) determine whether NX is enabled,
-     * 5) determine whether interrupts are enabled or not,
-     * 6) determine the mode the guest is running in,
-     * 7) return the current guest control-register value
-     * 8) return the current guest segment descriptor base
-     * 9) return the current guest segment descriptor
-     */
-    int (*paging_enabled)(struct vcpu *v);
-    int (*long_mode_enabled)(struct vcpu *v);
-    int (*pae_enabled)(struct vcpu *v);
-    int (*nx_enabled)(struct vcpu *v);
+     * 1) determine whether interrupts are enabled or not
+     * 2) determine the mode the guest is running in
+     * 3) return the current guest segment descriptor base
+     * 4) return the current guest segment descriptor
+     */
     int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
     int (*guest_x86_mode)(struct vcpu *v);
-    unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
     unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
 
     /* 
-     * Re-set the value of CR3 that Xen runs on when handling VM exits
+     * Re-set the value of CR3 that Xen runs on when handling VM exits.
      */
     void (*update_host_cr3)(struct vcpu *v);
 
     /*
-     * Called to inform HVM layer that a guest cr3 has changed
-     */
-    void (*update_guest_cr3)(struct vcpu *v);
+     * Called to inform HVM layer that a guest control register has changed.
+     */
+    void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
 
     /*
      * Called to ensure than all guest-specific mappings in a tagged TLB
@@ -189,38 +179,24 @@ void hvm_set_guest_time(struct vcpu *v, 
 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
 u64 hvm_get_guest_time(struct vcpu *v);
 
-static inline int
-hvm_paging_enabled(struct vcpu *v)
-{
-    return hvm_funcs.paging_enabled(v);
-}
+#define hvm_paging_enabled(v) \
+    (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
+#define hvm_pae_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_nx_enabled(v) \
+    (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
 #ifdef __x86_64__
-static inline int
-hvm_long_mode_enabled(struct vcpu *v)
-{
-    return hvm_funcs.long_mode_enabled(v);
-}
+#define hvm_long_mode_enabled(v) \
+    ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
 #else
 #define hvm_long_mode_enabled(v) (v,0)
 #endif
 
 static inline int
-hvm_pae_enabled(struct vcpu *v)
-{
-    return hvm_funcs.pae_enabled(v);
-}
-
-static inline int
 hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
 {
     return hvm_funcs.interrupts_enabled(v, type);
-}
-
-static inline int
-hvm_nx_enabled(struct vcpu *v)
-{
-    return hvm_funcs.nx_enabled(v);
 }
 
 static inline int
@@ -244,7 +220,10 @@ hvm_update_vtpr(struct vcpu *v, unsigned
     hvm_funcs.update_vtpr(v, value);
 }
 
-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
+static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+    hvm_funcs.update_guest_cr(v, cr);
+}
 
 static inline void 
 hvm_flush_guest_tlbs(void)
@@ -255,12 +234,6 @@ hvm_flush_guest_tlbs(void)
 
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
-
-static inline unsigned long
-hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    return hvm_funcs.get_guest_ctrl_reg(v, num);
-}
 
 static inline unsigned long
 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 08 12:27:23 2007 +0100
@@ -234,4 +234,7 @@ void hvm_hlt(unsigned long rflags);
 void hvm_hlt(unsigned long rflags);
 void hvm_triple_fault(void);
 
+int hvm_set_cr3(unsigned long value);
+int hvm_set_cr4(unsigned long value);
+
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/asid.h        Wed Aug 08 12:27:23 2007 +0100
@@ -32,20 +32,6 @@ void svm_asid_inv_asid(struct vcpu *v);
 void svm_asid_inv_asid(struct vcpu *v);
 void svm_asid_inc_generation(void);
 
-/*
- * ASID related, guest triggered events.
- */
-
-static inline void svm_asid_g_update_paging(struct vcpu *v)
-{
-    svm_asid_inv_asid(v);
-}
-
-static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
-{
-    svm_asid_inv_asid(v);
-}
-
 static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
 {
 #if 0
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Wed Aug 08 12:27:23 2007 +0100
@@ -440,11 +440,6 @@ struct arch_svm_struct {
     u32                *msrpm;
     int                 launch_core;
     bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
-    unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
-    unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
-    unsigned long       cpu_shadow_efer;  /* Guest value for EFER */
-    unsigned long       cpu_cr2;
-    unsigned long       cpu_cr3;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed Aug 08 12:27:23 2007 +0100
@@ -29,7 +29,17 @@
 #define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI     1
 
 struct hvm_vcpu {
-    unsigned long       hw_cr3;     /* value we give to HW to use */
+    /* Guest control-register and EFER values, just as the guest sees them. */
+    unsigned long       guest_cr[5];
+    unsigned long       guest_efer;
+
+    /*
+     * Processor-visible CR0-4 while guest executes.
+     * Only CR3 is guaranteed to be valid: all other array entries are private
+     * to the specific HVM implementation (e.g., VMX, SVM).
+     */
+    unsigned long       hw_cr[5];
+
     struct hvm_io_op    io_op;
     struct vlapic       vlapic;
     s64                 cache_tsc_offset;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Aug 08 12:27:23 2007 +0100
@@ -67,17 +67,11 @@ struct arch_vmx_struct {
     /* Cache of cpu execution control. */
     u32                  exec_control;
 
-    unsigned long        cpu_cr0; /* copy of guest CR0 */
-    unsigned long        cpu_shadow_cr0; /* copy of guest read shadow CR0 */
-    unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
-    unsigned long        cpu_cr2; /* save CR2 */
-    unsigned long        cpu_cr3;
 #ifdef __x86_64__
     struct vmx_msr_state msr_state;
     unsigned long        shadow_gs;
     unsigned long        cstar;
 #endif
-    unsigned long        efer;
 
     /* Following fields are all specific to vmxassist. */
     unsigned long        vmxassist_enabled:1;
diff -r 123ad31e9c3b -r 35337d5c83f9 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Aug 08 12:26:21 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Aug 08 12:27:23 2007 +0100
@@ -279,8 +279,8 @@ static inline void __vmx_inject_exceptio
 
     __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
 
-    if (trap == TRAP_page_fault)
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
+    if ( trap == TRAP_page_fault )
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
     else
         HVMTRACE_2D(INJ_EXC, v, trap, error_code);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.