[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/fpu: clean up FPU context save function



# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1304937484 -3600
# Node ID 389b0ce8b11a82cbff6cd8dcb01ed74dd9caa60c
# Parent  260c3f760eff2d594b55022cf230097147e5148c
x86/fpu: clean up FPU context save function

This patch cleans up context save function. It renames the save
function to vcpu_save_fpu() because existing function name is
confusion. It also extracts FPU context save code (fsave, fxsave,
xsave) into seperate functions. vcpu_save_fpu() will call
corresponding sub-function depending on CPU's capability.

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
---


diff -r 260c3f760eff -r 389b0ce8b11a xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c       Mon May 09 11:37:36 2011 +0100
+++ b/xen/arch/x86/acpi/suspend.c       Mon May 09 11:38:04 2011 +0100
@@ -24,7 +24,7 @@
 
 void save_rest_processor_state(void)
 {
-    save_init_fpu(current);
+    vcpu_save_fpu(current);
 
 #if defined(CONFIG_X86_64)
     asm volatile (
diff -r 260c3f760eff -r 389b0ce8b11a xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon May 09 11:37:36 2011 +0100
+++ b/xen/arch/x86/domain.c     Mon May 09 11:38:04 2011 +0100
@@ -1548,7 +1548,7 @@
     if ( !is_idle_vcpu(p) )
     {
         memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
-        save_init_fpu(p);
+        vcpu_save_fpu(p);
         p->arch.ctxt_switch_from(p);
     }
 
diff -r 260c3f760eff -r 389b0ce8b11a xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Mon May 09 11:37:36 2011 +0100
+++ b/xen/arch/x86/i387.c       Mon May 09 11:38:04 2011 +0100
@@ -66,78 +66,6 @@
         load_mxcsr(0x1f80);
 }
 
-void save_init_fpu(struct vcpu *v)
-{
-    unsigned long cr0;
-    char *fpu_ctxt;
-
-    if ( !v->fpu_dirtied )
-        return;
-
-    ASSERT(!is_idle_vcpu(v));
-
-    cr0 = read_cr0();
-    fpu_ctxt = v->arch.fpu_ctxt;
-
-    /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
-    if ( cr0 & X86_CR0_TS )
-        clts();
-
-    if ( xsave_enabled(v) )
-    {
-        /* XCR0 normally represents what guest OS set. In case of Xen itself,
-         * we set all accumulated feature mask before doing save/restore.
-         */
-        set_xcr0(v->arch.xcr0_accum);
-        xsave(v);
-        set_xcr0(v->arch.xcr0);
-    }
-    else if ( cpu_has_fxsr )
-    {
-#ifdef __i386__
-        asm volatile (
-            "fxsave %0"
-            : "=m" (*fpu_ctxt) );
-#else /* __x86_64__ */
-        /*
-         * The only way to force fxsaveq on a wide range of gas versions. On 
-         * older versions the rex64 prefix works only if we force an
-         * addressing mode that doesn't require extended registers.
-         */
-        asm volatile (
-            REX64_PREFIX "fxsave (%1)"
-            : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
-#endif
-
-        /* Clear exception flags if FSW.ES is set. */
-        if ( unlikely(fpu_ctxt[2] & 0x80) )
-            asm volatile ("fnclex");
-
-        /*
-         * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
-         * is pending. Clear the x87 state here by setting it to fixed
-         * values. The hypervisor data segment can be sometimes 0 and
-         * sometimes new user value. Both should be ok. Use the FPU saved
-         * data block as a safe address because it should be in L1.
-         */
-        if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
-        {
-            asm volatile (
-                "emms\n\t"  /* clear stack tags */
-                "fildl %0"  /* load to clear state */
-                : : "m" (*fpu_ctxt) );
-        }
-    }
-    else
-    {
-        /* FWAIT is required to make FNSAVE synchronous. */
-        asm volatile ( "fnsave %0 ; fwait" : "=m" (*fpu_ctxt) );
-    }
-
-    v->fpu_dirtied = 0;
-    write_cr0(cr0|X86_CR0_TS);
-}
-
 static void restore_fpu(struct vcpu *v)
 {
     const char *fpu_ctxt = v->arch.fpu_ctxt;
@@ -185,8 +113,96 @@
 }
 
 /*******************************/
+/*      FPU Save Functions     */
+/*******************************/
+/* Save x87 extended state */
+static inline void fpu_xsave(struct vcpu *v)
+{
+    /* XCR0 normally represents what guest OS set. In case of Xen itself,
+     * we set all accumulated feature mask before doing save/restore.
+     */
+    set_xcr0(v->arch.xcr0_accum);
+    xsave(v);
+    set_xcr0(v->arch.xcr0);    
+}
+
+/* Save x87 FPU, MMX, SSE and SSE2 state */
+static inline void fpu_fxsave(struct vcpu *v)
+{
+    char *fpu_ctxt = v->arch.fpu_ctxt;
+
+#ifdef __i386__
+    asm volatile (
+        "fxsave %0"
+        : "=m" (*fpu_ctxt) );
+#else /* __x86_64__ */
+    /*
+     * The only way to force fxsaveq on a wide range of gas versions. On 
+     * older versions the rex64 prefix works only if we force an
+     * addressing mode that doesn't require extended registers.
+     */
+    asm volatile (
+        REX64_PREFIX "fxsave (%1)"
+        : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
+#endif
+    
+    /* Clear exception flags if FSW.ES is set. */
+    if ( unlikely(fpu_ctxt[2] & 0x80) )
+        asm volatile ("fnclex");
+    
+    /*
+     * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+     * is pending. Clear the x87 state here by setting it to fixed
+     * values. The hypervisor data segment can be sometimes 0 and
+     * sometimes new user value. Both should be ok. Use the FPU saved
+     * data block as a safe address because it should be in L1.
+     */
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+    {
+        asm volatile (
+            "emms\n\t"  /* clear stack tags */
+            "fildl %0"  /* load to clear state */
+            : : "m" (*fpu_ctxt) );
+    }
+}
+
+/* Save x87 FPU state */
+static inline void fpu_fsave(struct vcpu *v)
+{
+    char *fpu_ctxt = v->arch.fpu_ctxt;
+
+    /* FWAIT is required to make FNSAVE synchronous. */
+    asm volatile ( "fnsave %0 ; fwait" : "=m" (*fpu_ctxt) );
+}
+
+/*******************************/
 /*       VCPU FPU Functions    */
 /*******************************/
+/* 
+ * On each context switch, save the necessary FPU info of VCPU being switch 
+ * out. It dispatches saving operation based on CPU's capability.
+ */
+void vcpu_save_fpu(struct vcpu *v)
+{
+    if ( !v->fpu_dirtied )
+        return;
+
+    ASSERT(!is_idle_vcpu(v));
+
+    /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
+    clts();
+
+    if ( xsave_enabled(v) )
+        fpu_xsave(v);
+    else if ( cpu_has_fxsr )
+        fpu_fxsave(v);
+    else
+        fpu_fsave(v);
+
+    v->fpu_dirtied = 0;
+    stts();
+}
+
 /* Initialize FPU's context save area */
 int vcpu_init_fpu(struct vcpu *v)
 {
diff -r 260c3f760eff -r 389b0ce8b11a xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h        Mon May 09 11:37:36 2011 +0100
+++ b/xen/include/asm-x86/i387.h        Mon May 09 11:38:04 2011 +0100
@@ -15,7 +15,7 @@
 #include <xen/percpu.h>
 
 void setup_fpu(struct vcpu *v);
-void save_init_fpu(struct vcpu *v);
+void vcpu_save_fpu(struct vcpu *v);
 
 int vcpu_init_fpu(struct vcpu *v);
 void vcpu_destroy_fpu(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.