[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Bug fix for when an attempt to grab a ref to a guest page fails.



ChangeSet 1.1236.32.5, 2005/03/15 14:26:41+00:00, mafetter@xxxxxxxxxxxxxxxx

        Bug fix for when an attempt to grab a ref to a guest page fails.
        In general, the code is much more paranoid now about checking
        the return status of shadow_get_page_from_l1e() and get_shadow_ref().
        
        Signed-off-by: michael.fetterman@xxxxxxxxxxxx



 arch/x86/shadow.c        |   23 +++++++++------
 include/asm-x86/shadow.h |   71 +++++++++++++++++++++++++----------------------
 2 files changed, 54 insertions(+), 40 deletions(-)


diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-04-05 12:08:44 -04:00
+++ b/xen/arch/x86/shadow.c     2005-04-05 12:08:44 -04:00
@@ -1178,7 +1178,8 @@
     ASSERT( !(old_sl2e & _PAGE_PRESENT) );
 #endif
 
-    get_shadow_ref(sl1mfn);
+    if (!get_shadow_ref(sl1mfn))
+        BUG();
     l2pde_general(d, &gl2e, &sl2e, sl1mfn);
     __guest_set_l2e(ed, va, gl2e);
     __shadow_set_l2e(ed, va, sl2e);
@@ -1195,9 +1196,13 @@
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            l1pte_propagate_from_guest(d, gpl1e[i], &spl1e[i]);
-            if ( spl1e[i] & _PAGE_PRESENT )
-                shadow_get_page_from_l1e(mk_l1_pgentry(spl1e[i]), d);
+            unsigned long sl1e;
+
+            l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
+            if ( (sl1e & _PAGE_PRESENT) &&
+                 !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
+                sl1e = 0;
+            spl1e[i] = sl1e;
         }
     }
 }
@@ -1502,8 +1507,9 @@
             unsigned long old = pt[i];
             unsigned long new = old & ~_PAGE_RW;
 
-            if ( is_l1_shadow )
-                shadow_get_page_from_l1e(mk_l1_pgentry(new), d);
+            if ( is_l1_shadow &&
+                 !shadow_get_page_from_l1e(mk_l1_pgentry(new), d) )
+                BUG();
 
             count++;
             pt[i] = new;
@@ -1724,8 +1730,9 @@
         unsigned long opte = *ppte;
         unsigned long npte = opte & ~_PAGE_RW;
 
-        if ( npte & _PAGE_PRESENT)
-            shadow_get_page_from_l1e(mk_l1_pgentry(npte), d);
+        if ( (npte & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(mk_l1_pgentry(npte), d) )
+            BUG();
         *ppte = npte;
         put_page_from_l1e(mk_l1_pgentry(opte), d);
 
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-04-05 12:08:44 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-04-05 12:08:44 -04:00
@@ -259,13 +259,16 @@
          (d != owner) )
     {
         res = get_page_from_l1e(l1e, owner);
-        printk("tried to map page from domain %d into shadow page tables "
+        printk("tried to map mfn %p from domain %d into shadow page tables "
                "of domain %d; %s\n",
-               owner->id, d->id, res ? "success" : "failed");
+               mfn, owner->id, d->id, res ? "success" : "failed");
     }
 
     if ( unlikely(!res) )
+    {
         perfc_incrc(shadow_get_page_fail);
+        FSH_LOG("%s failed to get ref l1e=%p\n", l1_pgentry_val(l1e));
+    }
 
     return res;
 }
@@ -313,8 +316,9 @@
         //
         if ( (old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT) )
         {
-            if ( new_hl2e & _PAGE_PRESENT )
-                shadow_get_page_from_l1e(mk_l1_pgentry(new_hl2e), ed->domain);
+            if ( (new_hl2e & _PAGE_PRESENT) &&
+                 !shadow_get_page_from_l1e(mk_l1_pgentry(new_hl2e), 
ed->domain) )
+                new_hl2e = 0;
             if ( old_hl2e & _PAGE_PRESENT )
                 put_page_from_l1e(mk_l1_pgentry(old_hl2e), ed->domain);
         }
@@ -531,26 +535,20 @@
 static inline void l1pte_propagate_from_guest(
     struct domain *d, unsigned long gpte, unsigned long *spte_p)
 { 
-    unsigned long spte = *spte_p;
     unsigned long pfn = gpte >> PAGE_SHIFT;
     unsigned long mfn = __gpfn_to_mfn(d, pfn);
+    unsigned long spte;
 
 #if SHADOW_VERBOSE_DEBUG
-    unsigned long old_spte = spte;
+    unsigned long old_spte = *spte_p;
 #endif
 
-    if ( unlikely(!mfn) )
-    {
-        // likely an MMIO address space mapping...
-        //
-        *spte_p = 0;
-        return;
-    }
-
     spte = 0;
-    if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
-         (_PAGE_PRESENT|_PAGE_ACCESSED) ) {
-        
+
+    if ( mfn &&
+         ((gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+          (_PAGE_PRESENT|_PAGE_ACCESSED)) ) {
+
         spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
         
         if ( shadow_mode_log_dirty(d) ||
@@ -563,7 +561,7 @@
 
 #if SHADOW_VERBOSE_DEBUG
     if ( old_spte || spte || gpte )
-        debugtrace_printk("l1pte_propagate_from_guest: gpte=0x%p, old 
spte=0x%p, new spte=0x%p\n", gpte, old_spte, spte);
+        SH_VLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new 
spte=0x%p", gpte, old_spte, spte);
 #endif
 
     *spte_p = spte;
@@ -624,8 +622,7 @@
 #endif
 
     old_spte = *shadow_pte_p;
-    l1pte_propagate_from_guest(d, new_pte, shadow_pte_p);
-    new_spte = *shadow_pte_p;
+    l1pte_propagate_from_guest(d, new_pte, &new_spte);
 
     // only do the ref counting if something important changed.
     //
@@ -634,12 +631,15 @@
     {
         perfc_incrc(validate_pte_changes);
 
-        if ( new_spte & _PAGE_PRESENT )
-            shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d);
+        if ( (new_spte & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
+            new_spte = 0;
         if ( old_spte & _PAGE_PRESENT )
             put_page_from_l1e(mk_l1_pgentry(old_spte), d);
     }
 
+    *shadow_pte_p = new_spte;
+
     // paranoia rules!
     return 1;
 }
@@ -652,13 +652,14 @@
     unsigned long new_pde,
     unsigned long *shadow_pde_p)
 {
-    unsigned long old_spde = *shadow_pde_p;
-    unsigned long new_spde;
+    unsigned long old_spde, new_spde;
 
     perfc_incrc(validate_pde_calls);
 
-    l2pde_propagate_from_guest(d, &new_pde, shadow_pde_p);
-    new_spde = *shadow_pde_p;
+    old_spde = *shadow_pde_p;
+    l2pde_propagate_from_guest(d, &new_pde, &new_spde);
+
+    // XXX Shouldn't we supposed to propagate the new_pde to the guest?
 
     // only do the ref counting if something important changed.
     //
@@ -667,12 +668,15 @@
     {
         perfc_incrc(validate_pde_changes);
 
-        if ( new_spde & _PAGE_PRESENT )
-            get_shadow_ref(new_spde >> PAGE_SHIFT);
+        if ( (new_spde & _PAGE_PRESENT) &&
+             !get_shadow_ref(new_spde >> PAGE_SHIFT) )
+            BUG();
         if ( old_spde & _PAGE_PRESENT )
             put_shadow_ref(old_spde >> PAGE_SHIFT);
     }
 
+    *shadow_pde_p = new_spde;
+
     // paranoia rules!
     return 1;
 }
@@ -1140,7 +1144,8 @@
             if ( sl1mfn )
             {
                 perfc_incrc(shadow_set_l1e_unlinked);
-                get_shadow_ref(sl1mfn);
+                if (!get_shadow_ref(sl1mfn))
+                    BUG();
                 l2pde_general(d, &gpde, &sl2e, sl1mfn);
                 __guest_set_l2e(ed, va, gpde);
                 __shadow_set_l2e(ed, va, sl2e);
@@ -1155,17 +1160,19 @@
     }
 
     old_spte = l1_pgentry_val(shadow_linear_pg_table[l1_linear_offset(va)]);
-    shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
 
     // only do the ref counting if something important changed.
     //
     if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) )
     {
-        if ( new_spte & _PAGE_PRESENT )
-            shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d);
+        if ( (new_spte & _PAGE_PRESENT) &&
+             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
+            new_spte = 0;
         if ( old_spte & _PAGE_PRESENT )
             put_page_from_l1e(mk_l1_pgentry(old_spte), d);
     }
+
+    shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
 }
 
 /************************************************************************/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.