[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/fpu: add mask parameter to xsave and xrstor



# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1304937535 -3600
# Node ID 631bdf00a81f2099233ca3146c8c7383511aba0f
# Parent  4288a8b9e88ca3050ee853bb9b38b7f56ef1c62f
x86/fpu: add mask parameter to xsave and xrstor

Xen currently sets mask bits of xsave() and xrstor() to all 1's. This
patch adds a mask option to xsave() and xrstor().

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
---


diff -r 4288a8b9e88c -r 631bdf00a81f xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Mon May 09 11:38:30 2011 +0100
+++ b/xen/arch/x86/i387.c       Mon May 09 11:38:55 2011 +0100
@@ -35,14 +35,14 @@
 /*     FPU Restore Functions   */
 /*******************************/
 /* Restore x87 extended state */
-static inline void fpu_xrstor(struct vcpu *v)
+static inline void fpu_xrstor(struct vcpu *v, uint64_t mask)
 {
     /*
      * XCR0 normally represents what guest OS set. In case of Xen itself, 
      * we set all supported feature mask before doing save/restore.
      */
     set_xcr0(v->arch.xcr0_accum);
-    xrstor(v);
+    xrstor(v, mask);
     set_xcr0(v->arch.xcr0);
 }
 
@@ -98,13 +98,13 @@
 /*      FPU Save Functions     */
 /*******************************/
 /* Save x87 extended state */
-static inline void fpu_xsave(struct vcpu *v)
+static inline void fpu_xsave(struct vcpu *v, uint64_t mask)
 {
     /* XCR0 normally represents what guest OS set. In case of Xen itself,
      * we set all accumulated feature mask before doing save/restore.
      */
     set_xcr0(v->arch.xcr0_accum);
-    xsave(v);
+    xsave(v, mask);
     set_xcr0(v->arch.xcr0);    
 }
 
@@ -174,7 +174,7 @@
         return;
 
     if ( xsave_enabled(v) )
-        fpu_xrstor(v);
+        fpu_xrstor(v, XSTATE_ALL);
     else if ( v->fpu_initialised )
     {
         if ( cpu_has_fxsr )
@@ -204,7 +204,7 @@
     clts();
 
     if ( xsave_enabled(v) )
-        fpu_xsave(v);
+        fpu_xsave(v, XSTATE_ALL);
     else if ( cpu_has_fxsr )
         fpu_fxsave(v);
     else
diff -r 4288a8b9e88c -r 631bdf00a81f xen/arch/x86/xstate.c
--- a/xen/arch/x86/xstate.c     Mon May 09 11:38:30 2011 +0100
+++ b/xen/arch/x86/xstate.c     Mon May 09 11:38:55 2011 +0100
@@ -51,32 +51,37 @@
     return this_cpu(xcr0);
 }
 
-void xsave(struct vcpu *v)
+void xsave(struct vcpu *v, uint64_t mask)
 {
     struct xsave_struct *ptr = v->arch.xsave_area;
+    uint32_t hmask = mask >> 32;
+    uint32_t lmask = mask;
 
     if ( cpu_has_xsaveopt )
         asm volatile (
             ".byte " REX_PREFIX "0x0f,0xae,0x37"
             :
-            : "a" (-1), "d" (-1), "D"(ptr)
+            : "a" (lmask), "d" (hmask), "D"(ptr)
             : "memory" );
     else
         asm volatile (
             ".byte " REX_PREFIX "0x0f,0xae,0x27"
             :
-            : "a" (-1), "d" (-1), "D"(ptr)
+            : "a" (lmask), "d" (hmask), "D"(ptr)
             : "memory" );
 }
 
-void xrstor(struct vcpu *v)
+void xrstor(struct vcpu *v, uint64_t mask)
 {
+    uint32_t hmask = mask >> 32;
+    uint32_t lmask = mask;
+
     struct xsave_struct *ptr = v->arch.xsave_area;
 
     asm volatile (
         ".byte " REX_PREFIX "0x0f,0xae,0x2f"
         :
-        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr) );
+        : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) );
 }
 
 bool_t xsave_enabled(const struct vcpu *v)
diff -r 4288a8b9e88c -r 631bdf00a81f xen/include/asm-x86/xstate.h
--- a/xen/include/asm-x86/xstate.h      Mon May 09 11:38:30 2011 +0100
+++ b/xen/include/asm-x86/xstate.h      Mon May 09 11:38:55 2011 +0100
@@ -27,6 +27,10 @@
 #define XSTATE_FP_SSE  (XSTATE_FP | XSTATE_SSE)
 #define XCNTXT_MASK    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP)
 
+#define XSTATE_ALL     (~0)
+#define XSTATE_NONLAZY (XSTATE_LWP)
+#define XSTATE_LAZY    (XSTATE_ALL & ~XSTATE_NONLAZY)
+
 #ifdef CONFIG_X86_64
 #define REX_PREFIX     "0x48, "
 #else
@@ -56,8 +60,8 @@
 /* extended state operations */
 void set_xcr0(u64 xfeatures);
 uint64_t get_xcr0(void);
-void xsave(struct vcpu *v);
-void xrstor(struct vcpu *v);
+void xsave(struct vcpu *v, uint64_t mask);
+void xrstor(struct vcpu *v, uint64_t mask);
 bool_t xsave_enabled(const struct vcpu *v);
 
 /* extended state init and cleanup functions */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.