[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] p2m: Always check to see if removing a p2m entry actually worked



commit 92790672dedf2eab042e04ecc277c19d40fd348a
Author:     George Dunlap <george.dunlap@xxxxxxxxxx>
AuthorDate: Tue Nov 28 13:13:03 2017 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Nov 28 13:13:03 2017 +0100

    p2m: Always check to see if removing a p2m entry actually worked
    
    The PoD zero-check functions speculatively remove memory from the p2m,
    then check to see if it's completely zeroed, before putting it in the
    cache.
    
    Unfortunately, the p2m_set_entry() calls may fail if the underlying
    pagetable structure needs to change and the domain has exhausted its
    p2m memory pool: for instance, if we're removing a 2MiB region out of
    a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
    region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
    case); and the return value is not checked.
    
    The underlying mfn will then be added into the PoD cache, and at some
    point mapped into another location in the p2m.  If the guest
    afterwards ballons out this memory, it will be freed to the hypervisor
    and potentially reused by another domain, in spite of the fact that
    the original domain still has writable mappings to it.
    
    There are several places where p2m_set_entry() shouldn't be able to
    fail, as it is guaranteed to write an entry of the same order that
    succeeded before.  Add a backstop of crashing the domain just in case,
    and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
    builds.
    
    While we're here, use PAGE_ORDER_2M rather than a magic constant.
    
    This is part of XSA-247.
    
    Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/mm/p2m-pod.c | 65 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 54 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 7ba56b1..cc8e3fb 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -771,8 +771,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t 
gfn)
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
-                  p2m_populate_on_demand, p2m->default_access);
+    if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
+                       p2m_populate_on_demand, p2m->default_access) )
+        goto out;
+
     p2m_tlb_flush_sync(p2m);
 
     /*
@@ -833,8 +835,17 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t 
gfn)
     ret = SUPERPAGE_PAGES;
 
 out_reset:
-    if ( reset )
-        p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
+    /*
+     * This p2m_set_entry() call shouldn't be able to fail, since the same 
order
+     * on the same gfn succeeded above.  If that turns out to be false, 
crashing
+     * the domain should be the safest way of making sure we don't leak memory.
+     */
+    if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
+                                type0, p2m->default_access) )
+    {
+        ASSERT_UNREACHABLE();
+        domain_crash(d);
+    }
 
 out:
     gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
@@ -900,8 +911,9 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
-                      p2m_populate_on_demand, p2m->default_access);
+        if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
+                           p2m_populate_on_demand, p2m->default_access) )
+            goto skip;
 
         /*
          * See if the page was successfully unmapped.  (Allow one refcount
@@ -909,12 +921,22 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
          */
         if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
         {
+            /*
+             * If the previous p2m_set_entry call succeeded, this one shouldn't
+             * be able to fail.  If it does, crashing the domain should be 
safe.
+             */
+            if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                               types[i], p2m->default_access) )
+            {
+                ASSERT_UNREACHABLE();
+                domain_crash(d);
+                goto out_unmap;
+            }
+
+        skip:
             unmap_domain_page(map[i]);
             map[i] = NULL;
 
-            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-                types[i], p2m->default_access);
-
             continue;
         }
     }
@@ -933,14 +955,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
 
         unmap_domain_page(map[i]);
 
+        map[i] = NULL;
+
         /*
          * See comment in p2m_pod_zero_check_superpage() re gnttab
          * check timing.
          */
         if ( j < (PAGE_SIZE / sizeof(*map[i])) )
         {
-            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-                          types[i], p2m->default_access);
+            /*
+             * If the previous p2m_set_entry call succeeded, this one shouldn't
+             * be able to fail.  If it does, crashing the domain should be 
safe.
+             */
+            if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                               types[i], p2m->default_access) )
+            {
+                ASSERT_UNREACHABLE();
+                domain_crash(d);
+                goto out_unmap;
+            }
         }
         else
         {
@@ -965,6 +998,16 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
         }
     }
 
+    return;
+
+out_unmap:
+    /*
+     * Something went wrong, probably crashing the domain.  Unmap
+     * everything and return.
+     */
+    for ( i = 0; i < count; i++ )
+        if ( map[i] )
+            unmap_domain_page(map[i]);
 }
 
 #define POD_SWEEP_LIMIT 1024
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.