[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [VMX] Remove vcpu->arch.hvm_vmx.cpu_state



# HG changeset patch
# User Tim Deegan <tim.deegan@xxxxxxxxxxxxx>
# Node ID 646a120334efd0c9b015875dce0b4e3d196b8a31
# Parent  b3cba293e61aae7594908dcd4b848022ceeeaf53
[VMX] Remove vcpu->arch.hvm_vmx.cpu_state

The flags in hvm_vmx.cpu_state were shadows of shadows of bits in
guest CR0_READ_SHADOW, CR4_READ_SHADOW and EFER, and were not being
kept in sync with the other shadows if changes were made while
running under vmxassist.  Just use the full shadows of those control
regs instead, and replace cpu_state with a single vmxassist flag.

Signed-off-by:  Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c         |  117 ++++++++++++++++---------------------
 xen/include/asm-x86/hvm/vmx/vmcs.h |   15 ----
 xen/include/asm-x86/hvm/vmx/vmx.h  |   32 +++++-----
 3 files changed, 71 insertions(+), 93 deletions(-)

diff -r b3cba293e61a -r 646a120334ef xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Oct 05 16:05:12 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Oct 05 16:21:39 2006 +0100
@@ -226,21 +226,10 @@ static inline int long_mode_do_msr_read(
     case MSR_EFER:
         HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
         msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
-
-        /* the following code may be not needed */
-        if ( test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
-            msr_content |= EFER_LME;
-        else
-            msr_content &= ~EFER_LME;
-
-        if ( VMX_LONG_GUEST(v) )
-            msr_content |= EFER_LMA;
-        else
-            msr_content &= ~EFER_LMA;
         break;
 
     case MSR_FS_BASE:
-        if ( !(VMX_LONG_GUEST(v)) )
+        if ( !(vmx_long_mode_enabled(v)) )
             /* XXX should it be GP fault */
             domain_crash_synchronous();
 
@@ -248,7 +237,7 @@ static inline int long_mode_do_msr_read(
         break;
 
     case MSR_GS_BASE:
-        if ( !(VMX_LONG_GUEST(v)) )
+        if ( !(vmx_long_mode_enabled(v)) )
             domain_crash_synchronous();
 
         __vmread(GUEST_GS_BASE, &msr_content);
@@ -296,21 +285,25 @@ static inline int long_mode_do_msr_write
             return 0;
         }
 
-        /* LME: 0 -> 1 */
-        if ( msr_content & EFER_LME &&
-             !test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
-        {
-            if ( vmx_paging_enabled(v) ||
-                 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                           &v->arch.hvm_vmx.cpu_state) )
+        if ( (msr_content & EFER_LME)
+             &&  !(msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
+        {
+            if ( unlikely(vmx_paging_enabled(v)) )
             {
-                printk("Trying to set LME bit when "
-                       "in paging mode or PAE bit is not set\n");
+                printk("Trying to set EFER.LME with paging enabled\n");
                 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
                 return 0;
             }
-
-            set_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state);
+        }
+        else if ( !(msr_content & EFER_LME)
+                  && (msr->msr_items[VMX_INDEX_MSR_EFER] & EFER_LME) )
+        {
+            if ( unlikely(vmx_paging_enabled(v)) )
+            {
+                printk("Trying to clear EFER.LME with paging enabled\n");
+                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+                return 0;
+            }
         }
 
         msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
@@ -318,7 +311,7 @@ static inline int long_mode_do_msr_write
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
-        if ( !(VMX_LONG_GUEST(v)) )
+        if ( !(vmx_long_mode_enabled(v)) )
             domain_crash_synchronous();
 
         if ( !IS_CANO_ADDRESS(msr_content) )
@@ -336,7 +329,7 @@ static inline int long_mode_do_msr_write
         break;
 
     case MSR_SHADOW_GS_BASE:
-        if ( !(VMX_LONG_GUEST(v)) )
+        if ( !(vmx_long_mode_enabled(v)) )
             domain_crash_synchronous();
 
         v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
@@ -1307,7 +1300,6 @@ static int vmx_world_restore(struct vcpu
 
  skip_cr3:
 
-    shadow_update_paging_modes(v);
     if (!vmx_paging_enabled(v))
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
     else
@@ -1363,6 +1355,8 @@ static int vmx_world_restore(struct vcpu
     error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
     error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
     error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
+
+    shadow_update_paging_modes(v);
 
     return !error;
 }
@@ -1408,6 +1402,7 @@ static int vmx_assist(struct vcpu *v, in
                 goto error;
             if (!vmx_world_restore(v, &c))
                 goto error;
+            v->arch.hvm_vmx.vmxassist_enabled = 1;            
             return 1;
         }
         break;
@@ -1425,6 +1420,7 @@ static int vmx_assist(struct vcpu *v, in
                 goto error;
             if (!vmx_world_restore(v, &c))
                 goto error;
+            v->arch.hvm_vmx.vmxassist_enabled = 0;
             return 1;
         }
         break;
@@ -1480,26 +1476,23 @@ static int vmx_set_cr0(unsigned long val
         }
 
 #if defined(__x86_64__)
-        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
-                      &v->arch.hvm_vmx.cpu_state) &&
-             !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                       &v->arch.hvm_vmx.cpu_state) )
-        {
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n");
-            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-        }
-
-        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
-                     &v->arch.hvm_vmx.cpu_state) )
-        {
-            /* Here the PAE is should be opened */
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enable long mode\n");
-            set_bit(VMX_CPU_STATE_LMA_ENABLED,
-                    &v->arch.hvm_vmx.cpu_state);
-
-            __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
-            vm_entry_value |= VM_ENTRY_IA32E_MODE;
-            __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
+        if ( vmx_lme_is_set(v) )
+        {
+            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
+            {
+                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
+                            "with EFER.LME set but not CR4.PAE\n");
+                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+            }
+            else 
+            {
+                HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
+                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
+                    |= EFER_LMA;
+                __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+                vm_entry_value |= VM_ENTRY_IA32E_MODE;
+                __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
+            }
         }
 #endif
 
@@ -1546,11 +1539,10 @@ static int vmx_set_cr0(unsigned long val
              * Disable paging here.
              * Same to PE == 1 && PG == 0
              */
-            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED,
-                          &v->arch.hvm_vmx.cpu_state) )
+            if ( vmx_long_mode_enabled(v) )
             {
-                clear_bit(VMX_CPU_STATE_LMA_ENABLED,
-                          &v->arch.hvm_vmx.cpu_state);
+                v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
+                    &= ~EFER_LMA;
                 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
                 vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
                 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
@@ -1559,22 +1551,19 @@ static int vmx_set_cr0(unsigned long val
 
         if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
         {
-            set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
             HVM_DBG_LOG(DBG_LEVEL_1,
                         "Transfering control to vmxassist %%eip 0x%lx\n", eip);
             return 0; /* do not update eip! */
         }
-    } else if ( test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
-                         &v->arch.hvm_vmx.cpu_state) )
+    }
+    else if ( v->arch.hvm_vmx.vmxassist_enabled )
     {
         __vmread(GUEST_RIP, &eip);
         HVM_DBG_LOG(DBG_LEVEL_1,
                     "Enabling CR0.PE at %%eip 0x%lx\n", eip);
         if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
         {
-            clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
-                      &v->arch.hvm_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
             HVM_DBG_LOG(DBG_LEVEL_1,
                         "Restoring to %%eip 0x%lx\n", eip);
@@ -1705,8 +1694,6 @@ static int mov_to_cr(int gp, int cr, str
 
         if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
         {
-            set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
-
             if ( vmx_pgbit_test(v) )
             {
                 /* The guest is a 32-bit PAE guest. */
@@ -1745,14 +1732,14 @@ static int mov_to_cr(int gp, int cr, str
 #endif
             }
         }
-        else if ( value & X86_CR4_PAE )
-            set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
-        else
-        {
-            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, 
&v->arch.hvm_vmx.cpu_state) )
+        else if ( !(value & X86_CR4_PAE) )
+        {
+            if ( unlikely(vmx_long_mode_enabled(v)) )
+            {
+                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
+                            "EFER.LMA is set\n");
                 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-
-            clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
+            }
         }
 
         __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
diff -r b3cba293e61a -r 646a120334ef xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Oct 05 16:05:12 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Oct 05 16:21:39 2006 +0100
@@ -28,19 +28,6 @@ extern void vmcs_dump_vcpu(void);
 extern void vmcs_dump_vcpu(void);
 extern void vmx_init_vmcs_config(void);
 extern void setup_vmcs_dump(void);
-
-enum {
-    VMX_CPU_STATE_PAE_ENABLED=0,
-    VMX_CPU_STATE_LME_ENABLED,
-    VMX_CPU_STATE_LMA_ENABLED,
-    VMX_CPU_STATE_ASSIST_ENABLED,
-};
-
-#define VMX_LONG_GUEST(ed)    \
-  (test_bit(VMX_CPU_STATE_LMA_ENABLED, &ed->arch.hvm_vmx.cpu_state))
-
-#define VMX_PAE_GUEST(ed)       \
-  (test_bit(VMX_CPU_STATE_PAE_ENABLED, &ed->arch.hvm_vmx.cpu_state))
 
 struct vmcs_struct {
     u32 vmcs_revision_id;
@@ -93,10 +80,10 @@ struct arch_vmx_struct {
     unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
     unsigned long        cpu_cr2; /* save CR2 */
     unsigned long        cpu_cr3;
-    unsigned long        cpu_state;
     unsigned long        cpu_based_exec_control;
     struct vmx_msr_state msr_content;
     void                *io_bitmap_a, *io_bitmap_b;
+    unsigned long        vmxassist_enabled:1; 
 };
 
 #define vmx_schedule_tail(next)         \
diff -r b3cba293e61a -r 646a120334ef xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Oct 05 16:05:12 2006 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Oct 05 16:21:39 2006 +0100
@@ -335,26 +335,30 @@ static inline int __vmxon (u64 addr)
     return rc;
 }
 
-/* Works only for vcpu == current */
 static inline int vmx_paging_enabled(struct vcpu *v)
 {
     unsigned long cr0;
-
     __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
-    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
-}
-
-/* Works only for vcpu == current */
+    return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
+}
+
+static inline int vmx_pae_enabled(struct vcpu *v)
+{
+    unsigned long cr4;
+    __vmread_vcpu(v, CR4_READ_SHADOW, &cr4);
+    return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
+}
+
 static inline int vmx_long_mode_enabled(struct vcpu *v)
 {
-    ASSERT(v == current);
-    return VMX_LONG_GUEST(current);
-}
-
-static inline int vmx_pae_enabled(struct vcpu *v)
-{
-    ASSERT(v == current);
-    return VMX_PAE_GUEST(current);
+    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
+    return efer & EFER_LMA;
+}
+
+static inline int vmx_lme_is_set(struct vcpu *v)
+{
+    u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
+    return efer & EFER_LME;
 }
 
 /* Works only for vcpu == current */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.