[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] NPT: temporarily retain page table mapping in do_recalc()



On 05/05/14 11:31, Jan Beulich wrote:
Commit b3e024f3 ("x86/NPT: don't walk page tables when changing types
on a range") neglected the fact that p2m_next_level() replaces the
previous level's mapping with the new level's one, hence dereferencing
a stale pointer the translation for which may no longer be available
(timing dependent). Add a parameter to that function allowing the
caller to request that the mapping be retained (the unmapping will be
taken care of by the caller then).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Given the new API for p2m_next_level(), it is important to note that the mapping can only be safely left in place on the success path.  In the case of an error the page must be unmapped even if 'unmap' is false.

While the current behaviour is safe, all it would take is an innocent refactoring of the error paths to invalidate this requirement.

Is it worth leaving a comment to that effect?

Functionally however, Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>


--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -172,7 +172,7 @@ static void p2m_add_iommu_flags(l1_pgent
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
-               u32 max, unsigned long type)
+               u32 max, unsigned long type, bool_t unmap)
 {
     l1_pgentry_t *l1_entry;
     l1_pgentry_t *p2m_entry;
@@ -280,7 +280,8 @@ p2m_next_level(struct p2m_domain *p2m, v
     }
 
     next = map_domain_page(l1e_get_pfn(*p2m_entry));
-    unmap_domain_page(*table);
+    if ( unmap )
+        unmap_domain_page(*table);
     *table = next;
 
     return 0;
@@ -319,7 +320,7 @@ static int p2m_pt_set_recalc_range(struc
 
         err = p2m_next_level(p2m, &table, &gfn_remainder, first_gfn,
                              i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
-                             pgt[i - 1]);
+                             pgt[i - 1], 1);
         if ( err )
             goto out;
     }
@@ -386,7 +387,7 @@ static int do_recalc(struct p2m_domain *
 
         err = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                              level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
-                             pgt[level - 1]);
+                             pgt[level - 1], 0);
         if ( err )
             goto out;
 
@@ -416,6 +417,7 @@ static int do_recalc(struct p2m_domain *
             clear_recalc(l1, e);
             p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1);
         }
+        unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK));
     }
 
     pent = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -519,7 +521,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
     rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
-                        L4_PAGETABLE_ENTRIES, PGT_l3_page_table);
+                        L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
     if ( rc )
         goto out;
 
@@ -565,7 +567,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     {
         rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                             L3_PAGETABLE_SHIFT - PAGE_SHIFT,
-                            L3_PAGETABLE_ENTRIES, PGT_l2_page_table);
+                            L3_PAGETABLE_ENTRIES, PGT_l2_page_table, 1);
         if ( rc )
             goto out;
     }
@@ -574,7 +576,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     {
         rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
-                            L2_PAGETABLE_ENTRIES, PGT_l1_page_table);
+                            L2_PAGETABLE_ENTRIES, PGT_l1_page_table, 1);
         if ( rc )
             goto out;
 





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.