[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] x86/p2m: force return value checking of p2m_set_entry()



As XSAs 246 and 247 have shown, not doing so is rather dangerous.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
v3: Make sure vm_event_cancel_slot() is called from the new error path in
    p2m_mem_paging_populate(). Downgrade Kevin's R-b to an ack for that
    reason.

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1550,9 +1550,11 @@ void p2m_mem_paging_populate(struct doma
         if ( p2mt == p2m_ram_paging_out )
             req.u.mem_paging.flags |= MEM_PAGING_EVICT_FAIL;
 
-        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
+        rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
     }
     gfn_unlock(p2m, gfn, 0);
+    if ( rc < 0 )
+        goto out_cancel;
 
     /* Pause domain if request came from guest and gfn has paging type */
     if ( p2m_is_paging(p2mt) && v->domain == d )
@@ -1564,6 +1566,7 @@ void p2m_mem_paging_populate(struct doma
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
+    out_cancel:
         vm_event_cancel_slot(d, d->vm_event_paging);
         return;
     }
@@ -1700,10 +1703,12 @@ void p2m_mem_paging_resume(struct domain
          */
         if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
         {
-            p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                          paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                          p2m_ram_rw, a);
-            set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
+            int rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                                   paging_mode_log_dirty(d) ? p2m_ram_logdirty 
:
+                                   p2m_ram_rw, a);
+
+            if ( !rc )
+                set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
         }
         gfn_unlock(p2m, gfn, 0);
     }
@@ -2463,9 +2468,9 @@ static void p2m_reset_altp2m(struct p2m_
     p2m->max_remapped_gfn = 0;
 }
 
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma)
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct p2m_domain *p2m;
     p2m_access_t a;
@@ -2474,9 +2479,10 @@ void p2m_altp2m_propagate_change(struct
     unsigned int i;
     unsigned int reset_count = 0;
     unsigned int last_reset_idx = ~0;
+    int ret = 0;
 
     if ( !altp2m_active(d) )
-        return;
+        return 0;
 
     altp2m_list_lock(d);
 
@@ -2515,17 +2521,25 @@ void p2m_altp2m_propagate_change(struct
                     p2m_unlock(p2m);
                 }
 
-                goto out;
+                ret = 0;
+                break;
             }
         }
         else if ( !mfn_eq(m, INVALID_MFN) )
-            p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+        {
+            int rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+
+            /* Best effort: Don't bail on error. */
+            if ( !ret )
+                ret = rc;
+        }
 
         __put_gfn(p2m, gfn_x(gfn));
     }
 
- out:
     altp2m_list_unlock(d);
+
+    return ret;
 }
 
 /*** Audit ***/
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -904,7 +904,11 @@ out:
         ept_free_entry(p2m, &old_entry, target);
 
     if ( entry_written && p2m_is_hostp2m(p2m) )
-        p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
+    {
+        ret = p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, 
p2ma);
+        if ( !rc )
+            rc = ret;
+    }
 
     return rc;
 }
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -689,8 +689,9 @@ void p2m_free_ptp(struct p2m_domain *p2m
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
-                  unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
+int __must_check p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
+                               unsigned int page_order, p2m_type_t p2mt,
+                               p2m_access_t p2ma);
 
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
@@ -830,9 +831,9 @@ int p2m_change_altp2m_gfn(struct domain
                           gfn_t old_gfn, gfn_t new_gfn);
 
 /* Propagate a host p2m change to all alternate p2m's */
-void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
-                                 mfn_t mfn, unsigned int page_order,
-                                 p2m_type_t p2mt, p2m_access_t p2ma);
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+                                mfn_t mfn, unsigned int page_order,
+                                p2m_type_t p2mt, p2m_access_t p2ma);
 
 /*
  * p2m type to IOMMU flags



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.