[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] x86 shadow: Fix lock-less race between resync and fast path.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1246883153 -3600
# Node ID e31633d67611a17269bdf4184ec16d8a9c52d9ef
# Parent  5018fc6b67a5b67cf05044a079a74b840d74ce1d
x86 shadow: Fix lock-less race between resync and fast path.

Signed-off-by: Gianluca Guida <gianluca.guida@xxxxxxxxxxxxx>
xen-unstable changeset:   19894:3a5d8601293c
xen-unstable date:        Mon Jul 06 11:49:56 2009 +0100
---
 xen/arch/x86/mm/shadow/multi.c |   48 ++++++++++++++++++++---------------------
 1 files changed, 24 insertions(+), 24 deletions(-)

diff -r 5018fc6b67a5 -r e31633d67611 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Jul 06 13:25:26 2009 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Jul 06 13:25:53 2009 +0100
@@ -2972,6 +2972,30 @@ static int sh_page_fault(struct vcpu *v,
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
     if ( (regs->error_code & PFEC_reserved_bit) )
     {
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
+        /* First, need to check that this isn't an out-of-sync
+         * shadow l1e.  If it is, we fall back to the slow path, which
+         * will sync it up again. */
+        {
+            shadow_l2e_t sl2e;
+            mfn_t gl1mfn;
+            if ( (__copy_from_user(&sl2e,
+                                   (sh_linear_l2_table(v)
+                                    + shadow_l2_linear_offset(va)),
+                                   sizeof(sl2e)) != 0)
+                 || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
+                 || !mfn_valid(gl1mfn = _mfn(mfn_to_page(
+                                  shadow_l2e_get_mfn(sl2e))->v.sh.back))
+                 || unlikely(mfn_is_out_of_sync(gl1mfn)) )
+            {
+                /* Hit the slow path as if there had been no 
+                 * shadow entry at all, and let it tidy up */
+                ASSERT(regs->error_code & PFEC_page_present);
+                regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
+                goto page_fault_slow_path;
+            }
+        }
+#endif /* SHOPT_OUT_OF_SYNC */
         /* The only reasons for reserved bits to be set in shadow entries 
          * are the two "magic" shadow_l1e entries. */
         if ( likely((__copy_from_user(&sl1e, 
@@ -2980,30 +3004,6 @@ static int sh_page_fault(struct vcpu *v,
                                       sizeof(sl1e)) == 0)
                     && sh_l1e_is_magic(sl1e)) )
         {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
-             /* First, need to check that this isn't an out-of-sync
-              * shadow l1e.  If it is, we fall back to the slow path, which
-              * will sync it up again. */
-            {
-                shadow_l2e_t sl2e;
-                mfn_t gl1mfn;
-               if ( (__copy_from_user(&sl2e,
-                                       (sh_linear_l2_table(v)
-                                        + shadow_l2_linear_offset(va)),
-                                       sizeof(sl2e)) != 0)
-                     || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
-                     || !mfn_valid(gl1mfn = _mfn(mfn_to_page(
-                                      shadow_l2e_get_mfn(sl2e))->v.sh.back))
-                     || unlikely(mfn_is_out_of_sync(gl1mfn)) )
-               {
-                   /* Hit the slow path as if there had been no 
-                    * shadow entry at all, and let it tidy up */
-                   ASSERT(regs->error_code & PFEC_page_present);
-                   regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
-                   goto page_fault_slow_path;
-               }
-            }
-#endif /* SHOPT_OUT_OF_SYNC */
 
             if ( sh_l1e_is_gnp(sl1e) )
             {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.