[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix VMX EFER write logic. Also some cleanups.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5b56d1e1ac8b9aa8d6b13545de772936efdfcaae
# Parent  e3c7b1e974597596c842837fddba3658ee13670b
Fix VMX EFER write logic. Also some cleanups.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>

diff -r e3c7b1e97459 -r 5b56d1e1ac8b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Sun Mar 19 11:41:28 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Sun Mar 19 11:48:51 2006
@@ -166,113 +166,139 @@
 #define IS_CANO_ADDRESS(add) 1
 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
 {
-    u64     msr_content = 0;
-    struct vcpu *vc = current;
-    struct vmx_msr_state * msr = &vc->arch.hvm_vmx.msr_content;
-    switch(regs->ecx){
+    u64 msr_content = 0;
+    struct vcpu *v = current;
+    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
+
+    switch ( regs->ecx ) {
     case MSR_EFER:
+        HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
         msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
-        HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %"PRIx64"\n", msr_content);
-        if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-                     &vc->arch.hvm_vmx.cpu_state))
-            msr_content |= 1 << _EFER_LME;
-
-        if (VMX_LONG_GUEST(vc))
-            msr_content |= 1 << _EFER_LMA;
-        break;
+
+        /* the following code may be not needed */
+        if ( test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
+            msr_content |= EFER_LME;
+        else
+            msr_content &= ~EFER_LME;
+
+        if ( VMX_LONG_GUEST(v) )
+            msr_content |= EFER_LMA;
+        else
+            msr_content &= ~EFER_LMA;
+        break;
+
     case MSR_FS_BASE:
-        if (!(VMX_LONG_GUEST(vc)))
+        if ( !(VMX_LONG_GUEST(v)) )
             /* XXX should it be GP fault */
             domain_crash_synchronous();
+
         __vmread(GUEST_FS_BASE, &msr_content);
         break;
+
     case MSR_GS_BASE:
-        if (!(VMX_LONG_GUEST(vc)))
+        if ( !(VMX_LONG_GUEST(v)) )
             domain_crash_synchronous();
+
         __vmread(GUEST_GS_BASE, &msr_content);
         break;
+
     case MSR_SHADOW_GS_BASE:
         msr_content = msr->shadow_gs;
         break;
 
-        CASE_READ_MSR(STAR);
-        CASE_READ_MSR(LSTAR);
-        CASE_READ_MSR(CSTAR);
-        CASE_READ_MSR(SYSCALL_MASK);
+    CASE_READ_MSR(STAR);
+    CASE_READ_MSR(LSTAR);
+    CASE_READ_MSR(CSTAR);
+    CASE_READ_MSR(SYSCALL_MASK);
+
     default:
         return 0;
     }
-    HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n",
-                msr_content);
+
+    HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: 0x%"PRIx64, msr_content);
+
     regs->eax = msr_content & 0xffffffff;
     regs->edx = msr_content >> 32;
+
     return 1;
 }
 
 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
 {
-    u64     msr_content = regs->eax | ((u64)regs->edx << 32);
-    struct vcpu *vc = current;
-    struct vmx_msr_state * msr = &vc->arch.hvm_vmx.msr_content;
-    struct vmx_msr_state * host_state =
-        &percpu_msr[smp_processor_id()];
-
-    HVM_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx "
-                "msr_content %"PRIx64"\n",
+    u64 msr_content = regs->eax | ((u64)regs->edx << 32);
+    struct vcpu *v = current;
+    struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
+    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
                 (unsigned long)regs->ecx, msr_content);
 
-    switch (regs->ecx){
+    switch ( regs->ecx ) {
     case MSR_EFER:
         /* offending reserved bit will cause #GP */
-        if ( msr_content &
-                ~( EFER_LME | EFER_LMA | EFER_NX | EFER_SCE ) )
-             vmx_inject_exception(vc, TRAP_gp_fault, 0);
-
-        if ((msr_content & EFER_LME) ^
-            test_bit(VMX_CPU_STATE_LME_ENABLED,
-                     &vc->arch.hvm_vmx.cpu_state)){
-            if ( vmx_paging_enabled(vc) ||
+        if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
+        {
+            printk("trying to set reserved bit in EFER\n");
+            vmx_inject_exception(v, TRAP_gp_fault, 0);
+            return 0;
+        }
+
+        /* LME: 0 -> 1 */
+        if ( msr_content & EFER_LME &&
+             !test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
+        {
+            if ( vmx_paging_enabled(v) ||
                  !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                           &vc->arch.hvm_vmx.cpu_state)) {
-                vmx_inject_exception(vc, TRAP_gp_fault, 0);
+                           &v->arch.hvm_vmx.cpu_state) )
+            {
+                printk("trying to set LME bit when "
+                       "in paging mode or PAE bit is not set\n");
+                vmx_inject_exception(v, TRAP_gp_fault, 0);
+                return 0;
             }
-        }
-        if (msr_content & EFER_LME)
-            set_bit(VMX_CPU_STATE_LME_ENABLED,
-                    &vc->arch.hvm_vmx.cpu_state);
-
-        msr->msr_items[VMX_INDEX_MSR_EFER] =
-            msr_content;
+
+            set_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state);
+        }
+
+        msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
         break;
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
-        if (!(VMX_LONG_GUEST(vc)))
+        if ( !(VMX_LONG_GUEST(v)) )
             domain_crash_synchronous();
-        if (!IS_CANO_ADDRESS(msr_content)){
+
+        if ( !IS_CANO_ADDRESS(msr_content) )
+        {
             HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
-            vmx_inject_exception(vc, TRAP_gp_fault, 0);
-        }
-        if (regs->ecx == MSR_FS_BASE)
+            vmx_inject_exception(v, TRAP_gp_fault, 0);
+            return 0;
+        }
+
+        if ( regs->ecx == MSR_FS_BASE )
             __vmwrite(GUEST_FS_BASE, msr_content);
         else
             __vmwrite(GUEST_GS_BASE, msr_content);
+
         break;
 
     case MSR_SHADOW_GS_BASE:
-        if (!(VMX_LONG_GUEST(vc)))
+        if ( !(VMX_LONG_GUEST(v)) )
             domain_crash_synchronous();
-        vc->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
+
+        v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
         wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
         break;
 
-        CASE_WRITE_MSR(STAR);
-        CASE_WRITE_MSR(LSTAR);
-        CASE_WRITE_MSR(CSTAR);
-        CASE_WRITE_MSR(SYSCALL_MASK);
+    CASE_WRITE_MSR(STAR);
+    CASE_WRITE_MSR(LSTAR);
+    CASE_WRITE_MSR(CSTAR);
+    CASE_WRITE_MSR(SYSCALL_MASK);
+
     default:
         return 0;
     }
+
     return 1;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.