[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 shadow: reset up-pointers on all l3s when l3s stop being pinnable.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1286028350 -3600
# Node ID a4016a2576726fe551cc30589f0bb905363a6944
# Parent  4beee577912215c734b79cb84bfe3fb20c1afbfc
x86 shadow: reset up-pointers on all l3s when l3s stop being pinnable.

Walking the pinned-shadows list isn't enough: there could be an
unpinned (but still shadowed) l3 somewhere and if we later try to
unshadow it it'll have an up-pointer of PAGE_LIST_NULL:PAGE_LIST_NULL.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |   41 +++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/mm/shadow/multi.c   |    4 ---
 xen/arch/x86/mm/shadow/private.h |    6 +++++
 3 files changed, 48 insertions(+), 3 deletions(-)

diff -r 4beee5779122 -r a4016a257672 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Sat Oct 02 15:04:21 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Sat Oct 02 15:05:50 2010 +0100
@@ -2776,6 +2776,47 @@ sh_remove_all_shadows_and_parents(struct
 
 /**************************************************************************/
 
+/* Reset the up-pointers of every L3 shadow to 0. 
+ * This is called when l3 shadows stop being pinnable, to clear out all
+ * the list-head bits so the up-pointer field is properly inititalised. */
+static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused)
+{
+    mfn_to_page(smfn)->up = 0;
+    return 0;
+}
+
+void sh_reset_l3_up_pointers(struct vcpu *v)
+{
+    static hash_callback_t callbacks[SH_type_unused] = {
+        NULL, /* none    */
+        NULL, /* l1_32   */
+        NULL, /* fl1_32  */
+        NULL, /* l2_32   */
+        NULL, /* l1_pae  */
+        NULL, /* fl1_pae */
+        NULL, /* l2_pae  */
+        NULL, /* l2h_pae */
+        NULL, /* l1_64   */
+        NULL, /* fl1_64  */
+        NULL, /* l2_64   */
+        NULL, /* l2h_64  */
+#if CONFIG_PAGING_LEVELS >= 4
+        sh_clear_up_pointer, /* l3_64   */
+#else
+        NULL, /* l3_64   */
+#endif
+        NULL, /* l4_64   */
+        NULL, /* p2m     */
+        NULL  /* unused  */
+    };
+    static unsigned int callback_mask = 1 << SH_type_l3_64_shadow;    
+
+    hash_foreach(v, callback_mask, callbacks, _mfn(INVALID_MFN));
+}
+
+
+/**************************************************************************/
+
 static void sh_update_paging_modes(struct vcpu *v)
 {
     struct domain *d = v->domain;
diff -r 4beee5779122 -r a4016a257672 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Sat Oct 02 15:04:21 2010 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Sat Oct 02 15:05:50 2010 +0100
@@ -1634,12 +1634,10 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
             page_list_for_each_safe(sp, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
             {
                 if ( sp->u.sh.type == SH_type_l3_64_shadow )
-                {
                     sh_unpin(v, page_to_mfn(sp));
-                    sp->up = 0;
-                }
             }
             v->domain->arch.paging.shadow.opt_flags &= 
~SHOPT_LINUX_L3_TOPLEVEL;
+            sh_reset_l3_up_pointers(v);
         }
     }
 #endif
diff -r 4beee5779122 -r a4016a257672 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Sat Oct 02 15:04:21 2010 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Sat Oct 02 15:05:50 2010 +0100
@@ -475,6 +475,12 @@ mfn_t oos_snapshot_lookup(struct vcpu *v
 
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
 
+
+/* Reset the up-pointers of every L3 shadow to 0. 
+ * This is called when l3 shadows stop being pinnable, to clear out all
+ * the list-head bits so the up-pointer field is properly inititalised. */
+void sh_reset_l3_up_pointers(struct vcpu *v);
+
 /******************************************************************************
  * Flags used in the return value of the shadow_set_lXe() functions...
  */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.