[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 fpu: Code clean up. Eliminate per-cpu xsave init verbosity.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1294996288 0
# Node ID 58304c1cc725e6ef5947aded0bee3b11f0a0dd4e
# Parent  4b7cb21caf0e114c5f6ef8a74c17ca799873e83b
x86 fpu: Code clean up. Eliminate per-cpu xsave init verbosity.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/acpi/suspend.c |    3 -
 xen/arch/x86/domain.c       |    2 
 xen/arch/x86/i387.c         |  126 ++++++++++++++++++++++++++++----------------
 xen/include/asm-x86/i387.h  |   48 ----------------
 4 files changed, 84 insertions(+), 95 deletions(-)

diff -r 4b7cb21caf0e -r 58304c1cc725 xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c       Fri Jan 14 08:34:53 2011 +0000
+++ b/xen/arch/x86/acpi/suspend.c       Fri Jan 14 09:11:28 2011 +0000
@@ -24,8 +24,7 @@ static uint16_t saved_segs[4];
 
 void save_rest_processor_state(void)
 {
-    if ( !is_idle_vcpu(current) )
-        unlazy_fpu(current);
+    save_init_fpu(current);
 
 #if defined(CONFIG_X86_64)
     asm volatile (
diff -r 4b7cb21caf0e -r 58304c1cc725 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Jan 14 08:34:53 2011 +0000
+++ b/xen/arch/x86/domain.c     Fri Jan 14 09:11:28 2011 +0000
@@ -1384,7 +1384,7 @@ static void __context_switch(void)
         memcpy(&p->arch.guest_context.user_regs,
                stack_regs,
                CTXT_SWITCH_STACK_BYTES);
-        unlazy_fpu(p);
+        save_init_fpu(p);
         p->arch.ctxt_switch_from(p);
     }
 
diff -r 4b7cb21caf0e -r 58304c1cc725 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Fri Jan 14 08:34:53 2011 +0000
+++ b/xen/arch/x86/i387.c       Fri Jan 14 09:11:28 2011 +0000
@@ -16,6 +16,49 @@
 #include <asm/i387.h>
 #include <asm/asm_defns.h>
 
+static bool_t __read_mostly cpu_has_xsaveopt;
+
+static void xsave(struct vcpu *v)
+{
+    struct xsave_struct *ptr = v->arch.xsave_area;
+
+    asm volatile (
+        ".byte " REX_PREFIX "0x0f,0xae,0x27"
+        :
+        : "a" (-1), "d" (-1), "D"(ptr)
+        : "memory" );
+}
+
+static void xsaveopt(struct vcpu *v)
+{
+    struct xsave_struct *ptr = v->arch.xsave_area;
+
+    asm volatile (
+        ".byte " REX_PREFIX "0x0f,0xae,0x37"
+        :
+        : "a" (-1), "d" (-1), "D"(ptr)
+        : "memory" );
+}
+
+static void xrstor(struct vcpu *v)
+{
+    struct xsave_struct *ptr = v->arch.xsave_area;
+
+    asm volatile (
+        ".byte " REX_PREFIX "0x0f,0xae,0x2f"
+        :
+        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr) );
+}
+
+static void load_mxcsr(unsigned long val)
+{
+    val &= 0xffbf;
+    asm volatile ( "ldmxcsr %0" : : "m" (val) );
+}
+
+static void init_fpu(void);
+static void restore_fpu(struct vcpu *v);
+
 void setup_fpu(struct vcpu *v)
 {
     ASSERT(!is_idle_vcpu(v));
@@ -23,46 +66,51 @@ void setup_fpu(struct vcpu *v)
     /* Avoid recursion. */
     clts();
 
-    if ( !v->fpu_dirtied )
-    {
-        v->fpu_dirtied = 1;
-        if ( cpu_has_xsave )
-        {
-            if ( !v->fpu_initialised )
-                v->fpu_initialised = 1;
-
-            /* XCR0 normally represents what guest OS set. In case of Xen
-             * itself, we set all supported feature mask before doing
-             * save/restore.
-             */
-            set_xcr0(v->arch.xcr0_accum);
-            xrstor(v);
-            set_xcr0(v->arch.xcr0);
-        }
-        else
-        {
-            if ( v->fpu_initialised )
-                restore_fpu(v);
-            else
-                init_fpu();
-        }
-    }
-}
-
-void init_fpu(void)
+    if ( v->fpu_dirtied )
+        return;
+
+    if ( cpu_has_xsave )
+    {
+        /*
+         * XCR0 normally represents what guest OS set. In case of Xen itself, 
+         * we set all supported feature mask before doing save/restore.
+         */
+        set_xcr0(v->arch.xcr0_accum);
+        xrstor(v);
+        set_xcr0(v->arch.xcr0);
+    }
+    else if ( v->fpu_initialised )
+    {
+        restore_fpu(v);
+    }
+    else
+    {
+        init_fpu();
+    }
+
+    v->fpu_initialised = 1;
+    v->fpu_dirtied = 1;
+}
+
+static void init_fpu(void)
 {
     asm volatile ( "fninit" );
     if ( cpu_has_xmm )
         load_mxcsr(0x1f80);
-    current->fpu_initialised = 1;
 }
 
 void save_init_fpu(struct vcpu *v)
 {
-    unsigned long cr0 = read_cr0();
-    char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
+    unsigned long cr0;
+    char *fpu_ctxt;
+
+    if ( !v->fpu_dirtied )
+        return;
 
     ASSERT(!is_idle_vcpu(v));
+
+    cr0 = read_cr0();
+    fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
 
     /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
     if ( cr0 & X86_CR0_TS )
@@ -126,7 +174,7 @@ void save_init_fpu(struct vcpu *v)
     write_cr0(cr0|X86_CR0_TS);
 }
 
-void restore_fpu(struct vcpu *v)
+static void restore_fpu(struct vcpu *v)
 {
     char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
 
@@ -188,29 +236,19 @@ u64 xfeature_mask;
 /* Cached xcr0 for fast read */
 DEFINE_PER_CPU(uint64_t, xcr0);
 
-bool_t __read_mostly cpu_has_xsaveopt;
-
 void xsave_init(void)
 {
     u32 eax, ebx, ecx, edx;
     int cpu = smp_processor_id();
     u32 min_size;
 
-    if ( boot_cpu_data.cpuid_level < XSTATE_CPUID ) {
-        printk(XENLOG_ERR "XSTATE_CPUID missing\n");
+    if ( boot_cpu_data.cpuid_level < XSTATE_CPUID )
         return;
-    }
 
     cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
 
-    printk("%s: cpu%d: cntxt_max_size: 0x%x and states: %08x:%08x\n",
-        __func__, cpu, ecx, edx, eax);
-
-    if ( ((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE) ||
-         ((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)) )
-    {
-        BUG();
-    }
+    BUG_ON((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE);
+    BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE));
 
     /* FP/SSE, XSAVE.HEADER, YMM */
     min_size =  XSAVE_AREA_MIN_SIZE;
diff -r 4b7cb21caf0e -r 58304c1cc725 xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h        Fri Jan 14 08:34:53 2011 +0000
+++ b/xen/include/asm-x86/i387.h        Fri Jan 14 09:11:28 2011 +0000
@@ -16,7 +16,6 @@
 
 extern unsigned int xsave_cntxt_size;
 extern u64 xfeature_mask;
-extern bool_t cpu_has_xsaveopt;
 
 void xsave_init(void);
 int xsave_alloc_save_area(struct vcpu *v);
@@ -75,54 +74,7 @@ static inline uint64_t get_xcr0(void)
     return this_cpu(xcr0);
 }
 
-static inline void xsave(struct vcpu *v)
-{
-    struct xsave_struct *ptr;
-
-    ptr =(struct xsave_struct *)v->arch.xsave_area;
-
-    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
-        :
-        : "a" (-1), "d" (-1), "D"(ptr)
-        : "memory");
-}
-
-static inline void xsaveopt(struct vcpu *v)
-{
-    struct xsave_struct *ptr;
-
-    ptr =(struct xsave_struct *)v->arch.xsave_area;
-
-    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
-        :
-        : "a" (-1), "d" (-1), "D"(ptr)
-        : "memory");
-}
-
-static inline void xrstor(struct vcpu *v)
-{
-    struct xsave_struct *ptr;
-
-    ptr =(struct xsave_struct *)v->arch.xsave_area;
-
-    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
-        :
-        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
-}
-
 extern void setup_fpu(struct vcpu *v);
-extern void init_fpu(void);
 extern void save_init_fpu(struct vcpu *v);
-extern void restore_fpu(struct vcpu *v);
-
-#define unlazy_fpu(v) do {                      \
-    if ( (v)->fpu_dirtied )                     \
-        save_init_fpu(v);                       \
-} while ( 0 )
-
-#define load_mxcsr(val) do {                                    \
-    unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf);    \
-    __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) );    \
-} while ( 0 )
 
 #endif /* __ASM_I386_I387_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.