[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/P2M: deal with partial success of p2m_set_entry()



commit 74a11c43fd7e074b1f77631b446dd2115eacb9e8
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Nov 22 11:12:32 2021 +0000
Commit:     Ian Jackson <iwj@xxxxxxxxxxxxxx>
CommitDate: Mon Nov 22 12:27:30 2021 +0000

    x86/P2M: deal with partial success of p2m_set_entry()
    
    M2P and PoD stats need to remain in sync with P2M; if an update succeeds
    only partially, respective adjustments need to be made. If updates get
    made before the call, they may also need undoing upon complete failure
    (i.e. including the single-page case).
    
    Log-dirty state would better also be kept in sync.
    
    Note that the change to set_typed_p2m_entry() may not be strictly
    necessary (due to the order restriction enforced near the top of the
    function), but is being kept here to be on the safe side.
    
    This is CVE-2021-28705 and CVE-2021-28709 / XSA-389.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c | 116 ++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 102 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 2bd4d37286..def1695cf0 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -794,6 +794,7 @@ p2m_remove_page(struct p2m_domain *p2m, gfn_t gfn, mfn_t 
mfn,
     unsigned long i;
     p2m_type_t t;
     p2m_access_t a;
+    int rc;
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_x(gfn), mfn_x(mfn));
@@ -825,8 +826,27 @@ p2m_remove_page(struct p2m_domain *p2m, gfn_t gfn, mfn_t 
mfn,
 
     ioreq_request_mapcache_invalidate(p2m->domain);
 
-    return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
-                         p2m->default_access);
+    rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
+                       p2m->default_access);
+    if ( likely(!rc) || !mfn_valid(mfn) )
+        return rc;
+
+    /*
+     * The operation may have partially succeeded. For the failed part we need
+     * to undo the M2P update and, out of precaution, mark the pages dirty
+     * again.
+     */
+    for ( i = 0; i < (1UL << page_order); ++i )
+    {
+        p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
+        if ( !p2m_is_hole(t) && !p2m_is_special(t) && !p2m_is_shared(t) )
+        {
+            set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn) + i);
+            paging_mark_pfn_dirty(p2m->domain, _pfn(gfn_x(gfn) + i));
+        }
+    }
+
+    return rc;
 }
 
 int
@@ -1022,13 +1042,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
mfn_t mfn,
 
     /* Now, actually do the two-way mapping */
     rc = p2m_set_entry(p2m, gfn, mfn, page_order, t, p2m->default_access);
-    if ( rc == 0 )
+    if ( likely(!rc) )
     {
-        pod_lock(p2m);
-        p2m->pod.entry_count -= pod_count;
-        BUG_ON(p2m->pod.entry_count < 0);
-        pod_unlock(p2m);
-
         if ( !p2m_is_grant(t) )
         {
             for ( i = 0; i < (1UL << page_order); i++ )
@@ -1036,6 +1051,42 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
mfn_t mfn,
                                   gfn_x(gfn_add(gfn, i)));
         }
     }
+    else
+    {
+        /*
+         * The operation may have partially succeeded. For the successful part
+         * we need to update M2P and dirty state, while for the failed part we
+         * may need to adjust PoD stats as well as undo the earlier M2P update.
+         */
+        for ( i = 0; i < (1UL << page_order); ++i )
+        {
+            omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, NULL, 
NULL);
+            if ( p2m_is_pod(ot) )
+            {
+                BUG_ON(!pod_count);
+                --pod_count;
+            }
+            else if ( mfn_eq(omfn, mfn_add(mfn, i)) && ot == t &&
+                      a == p2m->default_access && !p2m_is_grant(t) )
+            {
+                set_gpfn_from_mfn(mfn_x(omfn), gfn_x(gfn) + i);
+                paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn) + i));
+            }
+            else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
+            {
+                ASSERT(mfn_valid(omfn));
+                set_gpfn_from_mfn(mfn_x(omfn), gfn_x(gfn) + i);
+            }
+        }
+    }
+
+    if ( pod_count )
+    {
+        pod_lock(p2m);
+        p2m->pod.entry_count -= pod_count;
+        BUG_ON(p2m->pod.entry_count < 0);
+        pod_unlock(p2m);
+    }
 
 out:
     p2m_unlock(p2m);
@@ -1325,6 +1376,49 @@ static int set_typed_p2m_entry(struct domain *d, 
unsigned long gfn_l,
             return 0;
         }
     }
+
+    P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
+    rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
+    if ( unlikely(rc) )
+    {
+        gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
+                 gfn_l, order, rc, mfn_x(mfn));
+
+        /*
+         * The operation may have partially succeeded. For the successful part
+         * we need to update PoD stats, M2P, and dirty state.
+         */
+        if ( order != PAGE_ORDER_4K )
+        {
+            unsigned long i;
+
+            for ( i = 0; i < (1UL << order); ++i )
+            {
+                p2m_type_t t;
+                mfn_t cmfn = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
+                                            NULL, NULL);
+
+                if ( !mfn_eq(cmfn, mfn_add(mfn, i)) || t != gfn_p2mt ||
+                     a != access )
+                    continue;
+
+                if ( p2m_is_ram(ot) )
+                {
+                    ASSERT(mfn_valid(mfn_add(omfn, i)));
+                    set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
+
+                    ioreq_request_mapcache_invalidate(d);
+                }
+                else if ( p2m_is_pod(ot) )
+                {
+                    pod_lock(p2m);
+                    BUG_ON(!p2m->pod.entry_count);
+                    --p2m->pod.entry_count;
+                    pod_unlock(p2m);
+                }
+            }
+        }
+    }
     else if ( p2m_is_ram(ot) )
     {
         unsigned long i;
@@ -1337,12 +1431,6 @@ static int set_typed_p2m_entry(struct domain *d, 
unsigned long gfn_l,
 
         ioreq_request_mapcache_invalidate(d);
     }
-
-    P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
-    rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
-    if ( rc )
-        gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
-                 gfn_l, order, rc, mfn_x(mfn));
     else if ( p2m_is_pod(ot) )
     {
         pod_lock(p2m);
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.