[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.2-testing] hvm: FPU management cleanups.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1213697774 -3600
# Node ID 3d113bfab7d2ffa52fb6891b3346f0af9859979f
# Parent  7a741ec5503fa9e3c8c6c99726aa79c679d9dc56
hvm: FPU management cleanups.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   16971:e4edc310e949750065cb39588d87c335c7cd71a2
xen-unstable date:        Sun Feb 03 10:22:08 2008 +0000
---
 xen/arch/x86/hvm/hvm.c        |    3 -
 xen/arch/x86/hvm/svm/svm.c    |   88 ++++++++++++++++++++----------------
 xen/arch/x86/hvm/svm/vmcb.c   |    7 +-
 xen/arch/x86/hvm/vmx/vmcs.c   |    4 +
 xen/arch/x86/hvm/vmx/vmx.c    |  101 ++++++++++++++++++++----------------------
 xen/include/asm-x86/hvm/hvm.h |    6 --
 6 files changed, 105 insertions(+), 104 deletions(-)

diff -r 7a741ec5503f -r 3d113bfab7d2 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue Jun 17 11:16:14 2008 +0100
@@ -123,9 +123,6 @@ void hvm_do_resume(struct vcpu *v)
 void hvm_do_resume(struct vcpu *v)
 {
     ioreq_t *p;
-
-    if ( !v->fpu_dirtied )
-        hvm_funcs.stts(v);
 
     pt_restore_timer(v);
 
diff -r 7a741ec5503f -r 3d113bfab7d2 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Jun 17 11:16:14 2008 +0100
@@ -429,6 +429,34 @@ static int svm_load_vmcb_ctxt(struct vcp
     return 0;
 }
 
+static void svm_fpu_enter(struct vcpu *v)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    setup_fpu(v);
+    vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
+}
+
+static void svm_fpu_leave(struct vcpu *v)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    ASSERT(!v->fpu_dirtied);
+    ASSERT(read_cr0() & X86_CR0_TS);
+
+    /*
+     * If the guest does not have TS enabled then we must cause and handle an 
+     * exception on first use of the FPU. If the guest *does* have TS enabled 
+     * then this is not necessary: no FPU activity can occur until the guest 
+     * clears CR0.TS, and we will initialise the FPU when that happens.
+     */
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
+        vmcb->cr0 |= X86_CR0_TS;
+    }
+}
+
 static enum hvm_intblk svm_interrupt_blocked(
     struct vcpu *v, struct hvm_intack intack)
 {
@@ -474,19 +502,22 @@ static void svm_update_guest_cr(struct v
 
     switch ( cr )
     {
-    case 0:
-        /* TS cleared? Then initialise FPU now. */
-        if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
-             (vmcb->cr0 & X86_CR0_TS) )
+    case 0: {
+        unsigned long hw_cr0_mask = 0;
+
+        if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         {
-            setup_fpu(v);
-            vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
-        }
-
-        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+            if ( v != current )
+                hw_cr0_mask |= X86_CR0_TS;
+            else if ( vmcb->cr0 & X86_CR0_TS )
+                svm_fpu_enter(v);
+        }
+
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
         if ( !paging_mode_hap(v->domain) )
             vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
         break;
+    }
     case 2:
         vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2];
         break;
@@ -669,24 +700,6 @@ static void svm_set_segment_register(str
         svm_vmload(vmcb);
 }
 
-/* Make sure that xen intercepts any FP accesses from current */
-static void svm_stts(struct vcpu *v) 
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    /*
-     * If the guest does not have TS enabled then we must cause and handle an 
-     * exception on first use of the FPU. If the guest *does* have TS enabled 
-     * then this is not necessary: no FPU activity can occur until the guest 
-     * clears CR0.TS, and we will initialise the FPU when that happens.
-     */
-    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
-    {
-        v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
-        vmcb->cr0 |= X86_CR0_TS;
-    }
-}
-
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     v->arch.hvm_svm.vmcb->tsc_offset = offset;
@@ -715,6 +728,8 @@ static void svm_ctxt_switch_from(struct 
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
+
+    svm_fpu_leave(v);
 
     svm_save_dr(v);
 
@@ -883,7 +898,6 @@ static struct hvm_function_table svm_fun
     .update_guest_cr      = svm_update_guest_cr,
     .update_guest_efer    = svm_update_guest_efer,
     .flush_guest_tlbs     = svm_flush_guest_tlbs,
-    .stts                 = svm_stts,
     .set_tsc_offset       = svm_set_tsc_offset,
     .inject_exception     = svm_inject_exception,
     .init_hypercall_page  = svm_init_hypercall_page,
@@ -963,12 +977,11 @@ static void svm_do_nested_pgfault(paddr_
 
 static void svm_do_no_device_fault(struct vmcb_struct *vmcb)
 {
-    struct vcpu *v = current;
-
-    setup_fpu(v);    
-    vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
-
-    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    struct vcpu *curr = current;
+
+    svm_fpu_enter(curr);
+
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         vmcb->cr0 &= ~X86_CR0_TS;
 }
 
@@ -1642,11 +1655,8 @@ static void svm_cr_access(
         break;
 
     case INSTR_CLTS:
-        /* TS being cleared means that it's time to restore fpu state. */
-        setup_fpu(current);
-        vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
-        vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
-        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+        svm_update_guest_cr(v, 0);
         HVMTRACE_0D(CLTS, current);
         break;
 
diff -r 7a741ec5503f -r 3d113bfab7d2 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Jun 17 11:16:14 2008 +0100
@@ -212,20 +212,21 @@ static int construct_vmcb(struct vcpu *v
     vmcb->tr.base = 0;
     vmcb->tr.limit = 0xff;
 
-    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_TS;
+    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
     hvm_update_guest_cr(v, 0);
 
     v->arch.hvm_vcpu.guest_cr[4] = 0;
     hvm_update_guest_cr(v, 4);
 
     paging_update_paging_modes(v);
+
+    vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_no_device);
 
     if ( paging_mode_hap(v->domain) )
     {
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->exception_intercepts = HVM_TRAP_MASK;
 
         /*
          * No point in intercepting CR3 reads, because the hardware will return
@@ -241,7 +242,7 @@ static int construct_vmcb(struct vcpu *v
     }
     else
     {
-        vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_page_fault);
+        vmcb->exception_intercepts |= (1U << TRAP_page_fault);
     }
 
     return 0;
diff -r 7a741ec5503f -r 3d113bfab7d2 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jun 17 11:16:14 2008 +0100
@@ -591,7 +591,9 @@ static int construct_vmcs(struct vcpu *v
     __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
 #endif
 
-    __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
+    __vmwrite(EXCEPTION_BITMAP, (HVM_TRAP_MASK |
+                                 (1U << TRAP_page_fault) |
+                                 (1U << TRAP_no_device)));
 
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
     hvm_update_guest_cr(v, 0);
diff -r 7a741ec5503f -r 3d113bfab7d2 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Jun 17 11:16:14 2008 +0100
@@ -736,15 +736,42 @@ static int vmx_load_vmcs_ctxt(struct vcp
     return 0;
 }
 
-static void vmx_ctxt_switch_from(struct vcpu *v)
-{
+static void vmx_fpu_enter(struct vcpu *v)
+{
+    setup_fpu(v);
+    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+    v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
+    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+}
+
+static void vmx_fpu_leave(struct vcpu *v)
+{
+    ASSERT(!v->fpu_dirtied);
     ASSERT(read_cr0() & X86_CR0_TS);
+
     if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
     {
         v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
         __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
     }
 
+    /*
+     * If the guest does not have TS enabled then we must cause and handle an
+     * exception on first use of the FPU. If the guest *does* have TS enabled
+     * then this is not necessary: no FPU activity can occur until the guest
+     * clears CR0.TS, and we will initialise the FPU when that happens.
+     */
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
+    }
+}
+
+static void vmx_ctxt_switch_from(struct vcpu *v)
+{
+    vmx_fpu_leave(v);
     vmx_save_guest_msrs(v);
     vmx_restore_host_msrs();
     vmx_save_dr(v);
@@ -945,26 +972,6 @@ static void vmx_set_segment_register(str
     vmx_vmcs_exit(v);
 }
 
-/* Make sure that xen intercepts any FP accesses from current */
-static void vmx_stts(struct vcpu *v)
-{
-    /* VMX depends on operating on the current vcpu */
-    ASSERT(v == current);
-
-    /*
-     * If the guest does not have TS enabled then we must cause and handle an
-     * exception on first use of the FPU. If the guest *does* have TS enabled
-     * then this is not necessary: no FPU activity can occur until the guest
-     * clears CR0.TS, and we will initialise the FPU when that happens.
-     */
-    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
-    {
-        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
-    }
-}
-
 static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     vmx_vmcs_enter(v);
@@ -1041,13 +1048,16 @@ static void vmx_update_guest_cr(struct v
 
     switch ( cr )
     {
-    case 0:
-        /* TS cleared? Then initialise FPU now. */
-        if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
-             (v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS) )
+    case 0: {
+        unsigned long hw_cr0_mask =
+            X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
+
+        if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         {
-            setup_fpu(v);
-            __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+            if ( v != current )
+                hw_cr0_mask |= X86_CR0_TS;
+            else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
+                vmx_fpu_enter(v);
         }
 
 #ifndef VMXASSIST
@@ -1057,11 +1067,11 @@ static void vmx_update_guest_cr(struct v
 #endif
 
         v->arch.hvm_vcpu.hw_cr[0] =
-            v->arch.hvm_vcpu.guest_cr[0] |
-            X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE;
+            v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
         __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
         __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
         break;
+    }
     case 2:
         /* CR2 is updated in exit stub. */
         break;
@@ -1153,7 +1163,6 @@ static struct hvm_function_table vmx_fun
     .update_guest_cr      = vmx_update_guest_cr,
     .update_guest_efer    = vmx_update_guest_efer,
     .flush_guest_tlbs     = vmx_flush_guest_tlbs,
-    .stts                 = vmx_stts,
     .set_tsc_offset       = vmx_set_tsc_offset,
     .inject_exception     = vmx_inject_exception,
     .init_hypercall_page  = vmx_init_hypercall_page,
@@ -1233,20 +1242,15 @@ static void __update_guest_eip(unsigned 
 
 void vmx_do_no_device_fault(void)
 {
-    struct vcpu *v = current;
-
-    setup_fpu(current);
-    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
-
-    ASSERT(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS);
-    v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
-    __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
+    struct vcpu *curr = current;
+
+    vmx_fpu_enter(curr);
 
     /* Disable TS in guest CR0 unless the guest wants the exception too. */
-    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
-    {
-        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
+        __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
     }
 }
 
@@ -2215,15 +2219,8 @@ static int vmx_cr_access(unsigned long e
         mov_from_cr(cr, gp, regs);
         break;
     case TYPE_CLTS:
-        /* We initialise the FPU now, to avoid needing another vmexit. */
-        setup_fpu(v);
-        __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
-
-        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-
-        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+        vmx_update_guest_cr(v, 0);
         HVMTRACE_0D(CLTS, current);
         break;
     case TYPE_LMSW:
diff -r 7a741ec5503f -r 3d113bfab7d2 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Jun 17 11:11:21 2008 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Jun 17 11:16:14 2008 +0100
@@ -105,12 +105,6 @@ struct hvm_function_table {
      */
     void (*flush_guest_tlbs)(void);
 
-    /*
-     * Update specifics of the guest state:
-     * 1) TS bit in guest cr0 
-     * 2) TSC offset in guest
-     */
-    void (*stts)(struct vcpu *v);
     void (*set_tsc_offset)(struct vcpu *v, u64 offset);
 
     void (*inject_exception)(unsigned int trapnr, int errcode,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.