[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.1] vmx: Simplify cr0 update handling by deferring cr4 changes to the cr4 handler.



commit da7e3cdf0a1c2440886383e0e7f826c88796b8ca
Author:     Keir Fraser <keir@xxxxxxx>
AuthorDate: Tue Apr 9 16:25:20 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Apr 9 16:25:20 2013 +0200

    vmx: Simplify cr0 update handling by deferring cr4 changes to the cr4 
handler.
    
    Signed-off-by: Keir Fraser <keir@xxxxxxx>
    master commit: 1453984eab1297559e016d4e907a27e55997191c
    master date: 2013-01-30 09:15:39 -0800
---
 xen/arch/x86/hvm/vmx/vmx.c |   15 +++++----------
 1 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e9f1323..88eeb9d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1092,20 +1092,18 @@ static void vmx_update_guest_cr(struct vcpu *v, 
unsigned int cr)
 
         if ( paging_mode_hap(v->domain) )
         {
-            /* We manage GUEST_CR3 when guest CR0.PE is zero or when cr3 
memevents are on */            
+            /* Manage GUEST_CR3 when CR0.PE=0. */
             uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
                                  CPU_BASED_CR3_STORE_EXITING);
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
 
+            /* Trap CR3 updates if CR3 memory events are enabled. */
             if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] 
)
                 v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
 
             vmx_update_cpu_exec_control(v);
-
-            /* Changing CR0.PE can change some bits in real CR4. */
-            vmx_update_guest_cr(v, 4);
         }
 
         if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
@@ -1135,8 +1133,6 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr)
             {
                 for ( s = x86_seg_cs ; s <= x86_seg_tr ; s++ )
                     vmx_set_segment_register(v, s, &reg[s]);
-                v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
-                __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
                 v->arch.hvm_vmx.exception_bitmap = 0xffffffff;
                 vmx_update_exception_bitmap(v);
             }
@@ -1146,10 +1142,6 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr)
                     if ( !(v->arch.hvm_vmx.vm86_segment_mask & (1<<s)) )
                         vmx_set_segment_register(
                             v, s, &v->arch.hvm_vmx.vm86_saved_seg[s]);
-                v->arch.hvm_vcpu.hw_cr[4] =
-                    ((v->arch.hvm_vcpu.hw_cr[4] & ~X86_CR4_VME)
-                     |(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VME));
-                __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
                 v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
                           | (paging_mode_hap(v->domain) ?
                              0 : (1U << TRAP_page_fault))
@@ -1163,6 +1155,9 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr)
             v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
         __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
         __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+        /* Changing CR0 can change some bits in real CR4. */
+        vmx_update_guest_cr(v, 4);
         break;
     }
     case 2:
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.1

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.