[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.6] x86/HVM: don't cause #NM to be raised in Xen



commit 598a375f5230d91ac88e76a9f4b4dde4a62a4c5b
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Jun 28 12:28:21 2018 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jun 28 12:28:21 2018 +0200

    x86/HVM: don't cause #NM to be raised in Xen
    
    The changes for XSA-267 did not touch management of CR0.TS for HVM
    guests. In fully eager mode this bit should never be set when
    respective vCPU-s are active, or else hvmemul_get_fpu() might leave it
    wrongly set, leading to #NM in hypervisor context.
    
    {svm,vmx}_enter() and {svm,vmx}_fpu_dirty_intercept() become unreachable
    this way. Explicit {svm,vmx}_fpu_leave() invocations need to be guarded
    now.
    
    With no CR0.TS management necessary in fully eager mode, there's also no
    need anymore to intercept #NM.
    
    Reported-by: Charles Arnold <carnold@xxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c  | 8 ++++++--
 xen/arch/x86/hvm/svm/vmcb.c | 4 ++--
 xen/arch/x86/hvm/vmx/vmcs.c | 6 ++++--
 xen/arch/x86/hvm/vmx/vmx.c  | 8 ++++++--
 4 files changed, 18 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 6253944446..14699fcfee 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -548,7 +548,10 @@ void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
         if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         {
             if ( v != current )
-                hw_cr0_mask |= X86_CR0_TS;
+            {
+                if ( !v->arch.fully_eager_fpu )
+                    hw_cr0_mask |= X86_CR0_TS;
+            }
             else if ( vmcb_get_cr0(vmcb) & X86_CR0_TS )
                 svm_fpu_enter(v);
         }
@@ -984,7 +987,8 @@ static void svm_ctxt_switch_from(struct vcpu *v)
     if ( unlikely((read_efer() & EFER_SVME) == 0) )
         return;
 
-    svm_fpu_leave(v);
+    if ( !v->arch.fully_eager_fpu )
+        svm_fpu_leave(v);
 
     svm_save_dr(v);
     svm_lwp_save(v);
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 5682ff6842..6dc41dc400 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -194,8 +194,8 @@ static int construct_vmcb(struct vcpu *v)
     paging_update_paging_modes(v);
 
     vmcb->_exception_intercepts =
-        HVM_TRAP_MASK
-        | (1U << TRAP_no_device);
+        HVM_TRAP_MASK |
+        (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
 
     if ( paging_mode_hap(v->domain) )
     {
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index cf15a62898..60ef8a8c5b 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1134,7 +1134,9 @@ static int construct_vmcs(struct vcpu *v)
     __vmwrite(HOST_GS_BASE, 0);
 
     /* Host control registers. */
-    v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
+    v->arch.hvm_vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
+    if ( !v->arch.fully_eager_fpu )
+        v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
     __vmwrite(HOST_CR4, mmu_cr4_features);
 
@@ -1218,7 +1220,7 @@ static int construct_vmcs(struct vcpu *v)
 
     v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
               | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
-              | (1U << TRAP_no_device);
+              | (v->arch.fully_eager_fpu ? 0 : (1U << TRAP_no_device));
     vmx_update_exception_bitmap(v);
 
     /*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 79fcfaeaf3..1306d35f7d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -793,7 +793,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
         vmx_vmcs_reload(v);
     }
 
-    vmx_fpu_leave(v);
+    if ( !v->arch.fully_eager_fpu )
+        vmx_fpu_leave(v);
     vmx_save_guest_msrs(v);
     vmx_restore_host_msrs();
     vmx_save_dr(v);
@@ -1353,7 +1354,10 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned 
int cr)
         if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         {
             if ( v != current )
-                hw_cr0_mask |= X86_CR0_TS;
+            {
+                if ( !v->arch.fully_eager_fpu )
+                    hw_cr0_mask |= X86_CR0_TS;
+            }
             else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
                 vmx_fpu_enter(v);
         }
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.