[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Clean up some vmx code.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 0349fb4de335eb7497f8d33107b83fd73291e441
# Parent  5bf4d9a9694fe45f19dff8b1b04936358483f2f2
Clean up some vmx code.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>

diff -r 5bf4d9a9694f -r 0349fb4de335 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Thu Feb 23 10:31:01 2006
+++ b/tools/libxc/xc_hvm_build.c        Thu Feb 23 10:34:11 2006
@@ -20,7 +20,7 @@
 #define L3_PROT (_PAGE_PRESENT)
 #endif
 
-#define E820MAX        128
+#define E820MAX     128
 
 #define E820_RAM          1
 #define E820_RESERVED     2
@@ -149,7 +149,7 @@
         PAGE_SIZE,
         PROT_READ|PROT_WRITE,
         pfn_list[HVM_INFO_PFN]);
-    
+
     if ( va_map == NULL )
         return -1;
 
@@ -365,8 +365,8 @@
 
     if ( !strstr(xen_caps, "hvm") )
     {
-       PERROR("CPU doesn't support HVM extensions or "
-              "the extensions are not enabled");
+        PERROR("CPU doesn't support HVM extensions or "
+               "the extensions are not enabled");
         goto error_out;
     }
 
diff -r 5bf4d9a9694f -r 0349fb4de335 tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c       Thu Feb 23 10:31:01 2006
+++ b/tools/libxc/xc_ia64_stubs.c       Thu Feb 23 10:34:11 2006
@@ -16,7 +16,7 @@
 #undef __IA64_UL
 #define __IA64_UL(x)           ((unsigned long)(x))
 #undef __ASSEMBLY__
-                                                                               
 
+
 unsigned long xc_ia64_fpsr_default(void)
 {
         return FPSR_DEFAULT;
diff -r 5bf4d9a9694f -r 0349fb4de335 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Feb 23 10:31:01 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Feb 23 10:34:11 2006
@@ -642,7 +642,7 @@
 }
 
 /* Reserved bits: [31:15], [12:11], [9], [6], [2:1] */
-#define VMX_VCPU_CPUID_L1_RESERVED 0xffff9a46 
+#define VMX_VCPU_CPUID_L1_RESERVED 0xffff9a46
 
 static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs 
*regs)
 {
@@ -1185,8 +1185,12 @@
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
 
-    if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled) {
+    if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
+    {
+        unsigned long cr4;
+
         /*
+         * Trying to enable guest paging.
          * The guest CR3 must be pointing to the guest physical.
          */
         if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
@@ -1198,52 +1202,51 @@
         }
 
 #if defined(__x86_64__)
-        if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-                     &v->arch.hvm_vmx.cpu_state) &&
-            !test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                      &v->arch.hvm_vmx.cpu_state)){
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
+        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
+                      &v->arch.hvm_vmx.cpu_state) &&
+             !test_bit(VMX_CPU_STATE_PAE_ENABLED,
+                       &v->arch.hvm_vmx.cpu_state) )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n");
             vmx_inject_exception(v, TRAP_gp_fault, 0);
         }
-        if (test_bit(VMX_CPU_STATE_LME_ENABLED,
-                     &v->arch.hvm_vmx.cpu_state)){
-            /* Here the PAE is should to be opened */
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
+
+        if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
+                     &v->arch.hvm_vmx.cpu_state) )
+        {
+            /* Here the PAE is should be opened */
+            HVM_DBG_LOG(DBG_LEVEL_1, "Enable long mode\n");
             set_bit(VMX_CPU_STATE_LMA_ENABLED,
                     &v->arch.hvm_vmx.cpu_state);
+
             __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
             vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
             __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
-#if CONFIG_PAGING_LEVELS >= 4
-            if(!shadow_set_guest_paging_levels(v->domain, 4)) {
+            if ( !shadow_set_guest_paging_levels(v->domain, 4) ) {
+                printk("Unsupported guest paging levels\n");
+                domain_crash_synchronous(); /* need to take a clean path */
+            }
+        }
+        else
+#endif  /* __x86_64__ */
+        {
+#if CONFIG_PAGING_LEVELS >= 3
+            if ( !shadow_set_guest_paging_levels(v->domain, 2) ) {
                 printk("Unsupported guest paging levels\n");
                 domain_crash_synchronous(); /* need to take a clean path */
             }
 #endif
         }
-        else
-#endif  /* __x86_64__ */
+
+        /* update CR4's PAE if needed */
+        __vmread(GUEST_CR4, &cr4);
+        if ( (!(cr4 & X86_CR4_PAE)) &&
+             test_bit(VMX_CPU_STATE_PAE_ENABLED,
+                      &v->arch.hvm_vmx.cpu_state) )
         {
-#if CONFIG_PAGING_LEVELS >= 3
-            if(!shadow_set_guest_paging_levels(v->domain, 2)) {
-                printk("Unsupported guest paging levels\n");
-                domain_crash_synchronous(); /* need to take a clean path */
-            }
-#endif
-        }
-
-        {
-            unsigned long crn;
-            /* update CR4's PAE if needed */
-            __vmread(GUEST_CR4, &crn);
-            if ( (!(crn & X86_CR4_PAE)) &&
-                 test_bit(VMX_CPU_STATE_PAE_ENABLED,
-                          &v->arch.hvm_vmx.cpu_state) )
-            {
-                HVM_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
-                __vmwrite(GUEST_CR4, crn | X86_CR4_PAE);
-            }
+            HVM_DBG_LOG(DBG_LEVEL_1, "enable PAE in cr4\n");
+            __vmwrite(GUEST_CR4, cr4 | X86_CR4_PAE);
         }
 
         /*
@@ -1263,8 +1266,8 @@
                     v->arch.hvm_vmx.cpu_cr3, mfn);
     }
 
-    if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled)
-        if(v->arch.hvm_vmx.cpu_cr3) {
+    if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
+        if ( v->arch.hvm_vmx.cpu_cr3 ) {
             put_page(mfn_to_page(get_mfn_from_gpfn(
                       v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
             v->arch.guest_table = mk_pagetable(0);
@@ -1275,7 +1278,8 @@
      * real-mode by performing a world switch to VMXAssist whenever
      * a partition disables the CR0.PE bit.
      */
-    if ((value & X86_CR0_PE) == 0) {
+    if ( (value & X86_CR0_PE) == 0 )
+    {
         if ( value & X86_CR0_PG ) {
             /* inject GP here */
             vmx_inject_exception(v, TRAP_gp_fault, 0);
@@ -1285,8 +1289,9 @@
              * Disable paging here.
              * Same to PE == 1 && PG == 0
              */
-            if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
-                         &v->arch.hvm_vmx.cpu_state)){
+            if ( test_bit(VMX_CPU_STATE_LMA_ENABLED,
+                          &v->arch.hvm_vmx.cpu_state) )
+            {
                 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
                           &v->arch.hvm_vmx.cpu_state);
                 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
@@ -1296,19 +1301,21 @@
         }
 
         clear_all_shadow_status(v->domain);
-        if (vmx_assist(v, VMX_ASSIST_INVOKE)) {
+        if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) {
             set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
             HVM_DBG_LOG(DBG_LEVEL_1,
                         "Transfering control to vmxassist %%eip 0x%lx\n", eip);
             return 0; /* do not update eip! */
         }
-    } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
-                        &v->arch.hvm_vmx.cpu_state)) {
+    } else if ( test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+                         &v->arch.hvm_vmx.cpu_state) )
+    {
         __vmread(GUEST_RIP, &eip);
         HVM_DBG_LOG(DBG_LEVEL_1,
                     "Enabling CR0.PE at %%eip 0x%lx\n", eip);
-        if (vmx_assist(v, VMX_ASSIST_RESTORE)) {
+        if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
+        {
             clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
                       &v->arch.hvm_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
@@ -1438,15 +1445,13 @@
     }
     case 4: /* CR4 */
     {
-        unsigned long old_cr4;
-
-        __vmread(CR4_READ_SHADOW, &old_cr4);
-
-        if ( value & X86_CR4_PAE && !(old_cr4 & X86_CR4_PAE) )
+        __vmread(CR4_READ_SHADOW, &old_cr);
+
+        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
         {
             set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
 
-            if ( vmx_pgbit_test(v) ) 
+            if ( vmx_pgbit_test(v) )
             {
                 /* The guest is 32 bit. */
 #if CONFIG_PAGING_LEVELS >= 4
@@ -1460,7 +1465,7 @@
 
                 if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
                                     v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
-                     !get_page(mfn_to_page(mfn), v->domain) ) 
+                     !get_page(mfn_to_page(mfn), v->domain) )
                 {
                     printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
                     domain_crash_synchronous(); /* need to take a clean path */
@@ -1489,12 +1494,12 @@
                 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = 
%lx",
                             v->arch.hvm_vmx.cpu_cr3, mfn);
 #endif
-            } 
+            }
             else
             {
                 /*  The guest is 64 bit. */
 #if CONFIG_PAGING_LEVELS >= 4
-                if ( !shadow_set_guest_paging_levels(v->domain, 4) ) 
+                if ( !shadow_set_guest_paging_levels(v->domain, 4) )
                 {
                     printk("Unsupported guest paging levels\n");
                     domain_crash_synchronous(); /* need to take a clean path */
@@ -1512,7 +1517,6 @@
             clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
         }
 
-        __vmread(CR4_READ_SHADOW, &old_cr);
         __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
         __vmwrite(CR4_READ_SHADOW, value);
 
diff -r 5bf4d9a9694f -r 0349fb4de335 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Thu Feb 23 10:31:01 2006
+++ b/xen/arch/x86/shadow.c     Thu Feb 23 10:34:11 2006
@@ -3942,9 +3942,7 @@
      * on handling the #PF as such.
      */
     if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN )
-    {
-         goto fail;
-    }
+        return 0;
 
     shadow_lock(d);
 
@@ -3993,9 +3991,6 @@
 
     shadow_unlock(d);
     return EXCRET_fault_fixed;
-
-fail:
-    return 0;
 
 nomem:
     shadow_direct_map_clean(d);
diff -r 5bf4d9a9694f -r 0349fb4de335 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Thu Feb 23 10:31:01 2006
+++ b/xen/arch/x86/shadow32.c   Thu Feb 23 10:34:11 2006
@@ -1039,9 +1039,7 @@
      * on handling the #PF as such.
      */
     if ( (mfn = get_mfn_from_gpfn(vpa >> PAGE_SHIFT)) == INVALID_MFN )
-    {
-         goto fail;
-    }
+        return 0;
 
     shadow_lock(d);
 
@@ -1077,9 +1075,6 @@
 
     shadow_unlock(d);
     return EXCRET_fault_fixed;
-
-fail:
-    return 0;
 
 nomem:
     shadow_direct_map_clean(d);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.