[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/fpu: clean up FPU context restore function



# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1304937510 -3600
# Node ID 4288a8b9e88ca3050ee853bb9b38b7f56ef1c62f
# Parent  389b0ce8b11a82cbff6cd8dcb01ed74dd9caa60c
x86/fpu: clean up FPU context restore function

This patch cleans up context restore function. It renames the function
name to vcpu_restore_fpu(). It also extracts FPU restore code (frstor,
fxrstor, xrstor) out into seperate functions. vcpu_restor_fpu() will
dispatch to these functions depending on CPU's capability.

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
---


diff -r 389b0ce8b11a -r 4288a8b9e88c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon May 09 11:38:04 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon May 09 11:38:30 2011 +0100
@@ -348,7 +348,7 @@
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    setup_fpu(v);
+    vcpu_restore_fpu(v);
     vmcb_set_exception_intercepts(
         vmcb, vmcb_get_exception_intercepts(vmcb) & ~(1U << TRAP_no_device));
 }
diff -r 389b0ce8b11a -r 4288a8b9e88c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon May 09 11:38:04 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon May 09 11:38:30 2011 +0100
@@ -612,7 +612,7 @@
 
 static void vmx_fpu_enter(struct vcpu *v)
 {
-    setup_fpu(v);
+    vcpu_restore_fpu(v);
     v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
     vmx_update_exception_bitmap(v);
     v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
diff -r 389b0ce8b11a -r 4288a8b9e88c xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Mon May 09 11:38:04 2011 +0100
+++ b/xen/arch/x86/i387.c       Mon May 09 11:38:30 2011 +0100
@@ -17,56 +17,37 @@
 #include <asm/xstate.h>
 #include <asm/asm_defns.h>
 
-static void load_mxcsr(unsigned long val)
+#define MXCSR_DEFAULT 0x1f80
+static void fpu_init(void)
 {
-    val &= 0xffbf;
-    asm volatile ( "ldmxcsr %0" : : "m" (val) );
+    unsigned long val;
+    
+    asm volatile ( "fninit" );
+    if ( cpu_has_xmm )
+    {
+        /* load default value into MXCSR control/status register */
+        val = MXCSR_DEFAULT;
+        asm volatile ( "ldmxcsr %0" : : "m" (val) );
+    }
 }
 
-static void init_fpu(void);
-static void restore_fpu(struct vcpu *v);
-
-void setup_fpu(struct vcpu *v)
+/*******************************/
+/*     FPU Restore Functions   */
+/*******************************/
+/* Restore x87 extended state */
+static inline void fpu_xrstor(struct vcpu *v)
 {
-    ASSERT(!is_idle_vcpu(v));
-
-    /* Avoid recursion. */
-    clts();
-
-    if ( v->fpu_dirtied )
-        return;
-
-    if ( xsave_enabled(v) )
-    {
-        /*
-         * XCR0 normally represents what guest OS set. In case of Xen itself, 
-         * we set all supported feature mask before doing save/restore.
-         */
-        set_xcr0(v->arch.xcr0_accum);
-        xrstor(v);
-        set_xcr0(v->arch.xcr0);
-    }
-    else if ( v->fpu_initialised )
-    {
-        restore_fpu(v);
-    }
-    else
-    {
-        init_fpu();
-    }
-
-    v->fpu_initialised = 1;
-    v->fpu_dirtied = 1;
+    /*
+     * XCR0 normally represents what guest OS set. In case of Xen itself, 
+     * we set all supported feature mask before doing save/restore.
+     */
+    set_xcr0(v->arch.xcr0_accum);
+    xrstor(v);
+    set_xcr0(v->arch.xcr0);
 }
 
-static void init_fpu(void)
-{
-    asm volatile ( "fninit" );
-    if ( cpu_has_xmm )
-        load_mxcsr(0x1f80);
-}
-
-static void restore_fpu(struct vcpu *v)
+/* Restor x87 FPU, MMX, SSE and SSE2 state */
+static inline void fpu_fxrstor(struct vcpu *v)
 {
     const char *fpu_ctxt = v->arch.fpu_ctxt;
 
@@ -75,41 +56,42 @@
      * possibility, which may occur if the block was passed to us by control
      * tools, by silently clearing the block.
      */
-    if ( cpu_has_fxsr )
-    {
-        asm volatile (
+    asm volatile (
 #ifdef __i386__
-            "1: fxrstor %0            \n"
+        "1: fxrstor %0            \n"
 #else /* __x86_64__ */
-            /* See above for why the operands/constraints are this way. */
-            "1: " REX64_PREFIX "fxrstor (%2)\n"
+        /* See above for why the operands/constraints are this way. */
+        "1: " REX64_PREFIX "fxrstor (%2)\n"
 #endif
-            ".section .fixup,\"ax\"   \n"
-            "2: push %%"__OP"ax       \n"
-            "   push %%"__OP"cx       \n"
-            "   push %%"__OP"di       \n"
-            "   lea  %0,%%"__OP"di    \n"
-            "   mov  %1,%%ecx         \n"
-            "   xor  %%eax,%%eax      \n"
-            "   rep ; stosl           \n"
-            "   pop  %%"__OP"di       \n"
-            "   pop  %%"__OP"cx       \n"
-            "   pop  %%"__OP"ax       \n"
-            "   jmp  1b               \n"
-            ".previous                \n"
-            _ASM_EXTABLE(1b, 2b)
-            : 
-            : "m" (*fpu_ctxt),
-              "i" (sizeof(v->arch.xsave_area->fpu_sse)/4)
+        ".section .fixup,\"ax\"   \n"
+        "2: push %%"__OP"ax       \n"
+        "   push %%"__OP"cx       \n"
+        "   push %%"__OP"di       \n"
+        "   lea  %0,%%"__OP"di    \n"
+        "   mov  %1,%%ecx         \n"
+        "   xor  %%eax,%%eax      \n"
+        "   rep ; stosl           \n"
+        "   pop  %%"__OP"di       \n"
+        "   pop  %%"__OP"cx       \n"
+        "   pop  %%"__OP"ax       \n"
+        "   jmp  1b               \n"
+        ".previous                \n"
+        _ASM_EXTABLE(1b, 2b)
+        : 
+        : "m" (*fpu_ctxt),
+          "i" (sizeof(v->arch.xsave_area->fpu_sse)/4)
 #ifdef __x86_64__
-             ,"cdaSDb" (fpu_ctxt)
+          ,"cdaSDb" (fpu_ctxt)
 #endif
-            );
-    }
-    else
-    {
-        asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) );
-    }
+        );
+}
+
+/* Restore x87 extended state */
+static inline void fpu_frstor(struct vcpu *v)
+{
+    const char *fpu_ctxt = v->arch.fpu_ctxt;
+
+    asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) );
 }
 
 /*******************************/
@@ -179,6 +161,35 @@
 /*       VCPU FPU Functions    */
 /*******************************/
 /* 
+ * Restore FPU state when #NM is triggered.
+ */
+void vcpu_restore_fpu(struct vcpu *v)
+{
+    ASSERT(!is_idle_vcpu(v));
+
+    /* Avoid recursion. */
+    clts();
+
+    if ( v->fpu_dirtied )
+        return;
+
+    if ( xsave_enabled(v) )
+        fpu_xrstor(v);
+    else if ( v->fpu_initialised )
+    {
+        if ( cpu_has_fxsr )
+            fpu_fxrstor(v);
+        else
+            fpu_frstor(v);
+    }
+    else
+        fpu_init();
+
+    v->fpu_initialised = 1;
+    v->fpu_dirtied = 1;
+}
+
+/* 
  * On each context switch, save the necessary FPU info of VCPU being switch 
  * out. It dispatches saving operation based on CPU's capability.
  */
diff -r 389b0ce8b11a -r 4288a8b9e88c xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Mon May 09 11:38:04 2011 +0100
+++ b/xen/arch/x86/traps.c      Mon May 09 11:38:30 2011 +0100
@@ -3198,7 +3198,7 @@
 
     BUG_ON(!guest_mode(regs));
 
-    setup_fpu(curr);
+    vcpu_restore_fpu(curr);
 
     if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS )
     {
diff -r 389b0ce8b11a -r 4288a8b9e88c xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h        Mon May 09 11:38:04 2011 +0100
+++ b/xen/include/asm-x86/i387.h        Mon May 09 11:38:30 2011 +0100
@@ -14,7 +14,7 @@
 #include <xen/types.h>
 #include <xen/percpu.h>
 
-void setup_fpu(struct vcpu *v);
+void vcpu_restore_fpu(struct vcpu *v);
 void vcpu_save_fpu(struct vcpu *v);
 
 int vcpu_init_fpu(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.