[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.12] x86/p2m: guard (in particular) identity mapping entries



commit 4e5bf7ebbfd0bb4fc57466f8eebfb90047a3e277
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Aug 25 15:49:58 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Aug 25 15:49:58 2021 +0200

    x86/p2m: guard (in particular) identity mapping entries
    
    Such entries, created by set_identity_p2m_entry(), should only be
    destroyed by clear_identity_p2m_entry(). However, similarly, entries
    created by set_mmio_p2m_entry() should only be torn down by
    clear_mmio_p2m_entry(), so the logic gets based upon p2m_mmio_direct as
    the entry type (separation between "ordinary" and 1:1 mappings would
    require a further indicator to tell apart the two).
    
    As to the guest_remove_page() change, commit 48dfb297a20a ("x86/PVH:
    allow guest_remove_page to remove p2m_mmio_direct pages"), which
    introduced the call to clear_mmio_p2m_entry(), claimed this was done for
    hwdom only without this actually having been the case. However, this
    code shouldn't be there in the first place, as MMIO entries shouldn't be
    dropped this way. Avoid triggering the warning again that 48dfb297a20a
    silenced by an adjustment to xenmem_add_to_physmap_one() instead.
    
    Note that guest_physmap_mark_populate_on_demand() gets tightened beyond
    the immediate purpose of this change.
    
    Note also that I didn't inspect code which isn't security supported,
    e.g. sharing, paging, or altp2m.
    
    This is CVE-2021-28694 / part of XSA-378.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
    master commit: 753cb68e653002e89fdcd1c80e52905fdbfb78cb
    master date: 2021-08-25 14:17:32 +0200
---
 xen/arch/x86/mm.c         |  4 +++-
 xen/arch/x86/mm/p2m-pod.c | 12 ++++++------
 xen/arch/x86/mm/p2m.c     | 11 ++++++-----
 xen/common/memory.c       | 11 ++++++++++-
 xen/include/asm-x86/p2m.h |  5 ++---
 5 files changed, 27 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index db4cfdf20b..09d6377384 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4809,7 +4809,9 @@ int xenmem_add_to_physmap_one(
 
     /* Remove previously mapped page if it was present. */
     prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
-    if ( mfn_valid(_mfn(prev_mfn)) )
+    if ( p2mt == p2m_mmio_direct )
+        rc = -EPERM;
+    else if ( mfn_valid(_mfn(prev_mfn)) )
     {
         if ( is_xen_heap_mfn(prev_mfn) )
             /* Xen heap frames are simply unhooked from this phys slot. */
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 4313863066..f4e5a35578 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1297,17 +1297,17 @@ guest_physmap_mark_populate_on_demand(struct domain *d, 
unsigned long gfn_l,
 
         p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, &cur_order, NULL);
         n = 1UL << min(order, cur_order);
-        if ( p2m_is_ram(ot) )
+        if ( ot == p2m_populate_on_demand )
+        {
+            /* Count how many PoD entries we'll be replacing if successful */
+            pod_count += n;
+        }
+        else if ( ot != p2m_invalid && ot != p2m_mmio_dm )
         {
             P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
             rc = -EBUSY;
             goto out;
         }
-        else if ( ot == p2m_populate_on_demand )
-        {
-            /* Count how man PoD entries we'll be replacing if successful */
-            pod_count += n;
-        }
     }
 
     /* Now, actually do the two-way mapping */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9bd6a5ad0b..f3fdc79ff4 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -795,7 +795,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
gfn_l, unsigned long mfn,
                                           &cur_order, NULL);
 
         if ( p2m_is_valid(t) &&
-             (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) )
+             (!mfn_valid(_mfn(mfn)) || t == p2m_mmio_direct ||
+              mfn + i != mfn_x(mfn_return)) )
             return -EILSEQ;
 
         i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1));
@@ -873,7 +874,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
     if ( p2m_is_foreign(t) )
         return -EINVAL;
 
-    if ( !mfn_valid(mfn) )
+    if ( !mfn_valid(mfn) || t == p2m_mmio_direct )
     {
         ASSERT_UNREACHABLE();
         return -EINVAL;
@@ -919,7 +920,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
         }
         if ( p2m_is_special(ot) )
         {
-            /* Don't permit unmapping grant/foreign this way. */
+            /* Don't permit unmapping grant/foreign/direct-MMIO this way. */
             domain_crash(d);
             p2m_unlock(p2m);
             
@@ -1375,8 +1376,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned 
long gfn_l,
  *    order+1  for caller to retry with order (guaranteed smaller than
  *             the order value passed in)
  */
-int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l, mfn_t mfn,
-                         unsigned int order)
+static int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn_l,
+                                mfn_t mfn, unsigned int order)
 {
     int rc = -EINVAL;
     gfn_t gfn = _gfn(gfn_l);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 8aea327df4..ab217cc812 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -336,7 +336,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
     }
     if ( p2mt == p2m_mmio_direct )
     {
-        rc = clear_mmio_p2m_entry(d, gmfn, mfn, PAGE_ORDER_4K);
+        rc = -EPERM;
         goto out_put_gfn;
     }
 #else
@@ -1724,6 +1724,15 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, 
bool readonly,
         return -EAGAIN;
     }
 #endif
+#ifdef CONFIG_X86
+    if ( p2mt == p2m_mmio_direct )
+    {
+        if ( page )
+            put_page(page);
+
+        return -EPERM;
+    }
+#endif
 
     if ( !page )
         return -EINVAL;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 57deff6a64..344db14a10 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -143,7 +143,8 @@ typedef unsigned int p2m_query_t;
 
 /* Types established/cleaned up via special accessors. */
 #define P2M_SPECIAL_TYPES (P2M_GRANT_TYPES | \
-                           p2m_to_mask(p2m_map_foreign))
+                           p2m_to_mask(p2m_map_foreign) | \
+                           p2m_to_mask(p2m_mmio_direct))
 
 /* Valid types not necessarily associated with a (valid) MFN. */
 #define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES                  \
@@ -640,8 +641,6 @@ int set_foreign_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn);
 /* Set mmio addresses in the p2m table (for pass-through) */
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
                        unsigned int order, p2m_access_t access);
-int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
-                         unsigned int order);
 
 /* Set identity addresses in the p2m table (for pass-through) */
 int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.12



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.