[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/p2m: add option to skip root pagetable removal in p2m_teardown()



commit 1df52a270225527ae27bfa2fc40347bf93b78357
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Tue Oct 11 14:21:23 2022 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Oct 11 14:21:23 2022 +0200

    x86/p2m: add option to skip root pagetable removal in p2m_teardown()
    
    Add a new parameter to p2m_teardown() in order to select whether the
    root page table should also be freed.  Note that all users are
    adjusted to pass the parameter to remove the root page tables, so
    behavior is not modified.
    
    No functional change intended.
    
    This is part of CVE-2022-33746 / XSA-410.
    
    Suggested-by: Julien Grall <julien@xxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/include/asm/p2m.h  |  2 +-
 xen/arch/x86/mm/hap/hap.c       |  6 +++---
 xen/arch/x86/mm/p2m-basic.c     | 18 ++++++++++++++----
 xen/arch/x86/mm/shadow/common.c |  4 ++--
 4 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h
index 0a0f7114f3..bafbd96052 100644
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -600,7 +600,7 @@ int p2m_init(struct domain *d);
 int p2m_alloc_table(struct p2m_domain *p2m);
 
 /* Return all the p2m resources to Xen. */
-void p2m_teardown(struct p2m_domain *p2m);
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root);
 void p2m_final_teardown(struct domain *d);
 
 /* Add/remove a page to/from a domain's p2m table. */
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 79929774e8..9e0b725c59 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -541,18 +541,18 @@ void hap_final_teardown(struct domain *d)
         }
 
         for ( i = 0; i < MAX_ALTP2M; i++ )
-            p2m_teardown(d->arch.altp2m_p2m[i]);
+            p2m_teardown(d->arch.altp2m_p2m[i], true);
     }
 
     /* Destroy nestedp2m's first */
     for (i = 0; i < MAX_NESTEDP2M; i++) {
-        p2m_teardown(d->arch.nested_p2m[i]);
+        p2m_teardown(d->arch.nested_p2m[i], true);
     }
 
     if ( d->arch.paging.hap.total_pages != 0 )
         hap_teardown(d, NULL);
 
-    p2m_teardown(p2m_get_hostp2m(d));
+    p2m_teardown(p2m_get_hostp2m(d), true);
     /* Free any memory that the p2m teardown released */
     paging_lock(d);
     hap_set_allocation(d, 0, NULL);
diff --git a/xen/arch/x86/mm/p2m-basic.c b/xen/arch/x86/mm/p2m-basic.c
index 9130fc2a70..3231aaa9ba 100644
--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -154,10 +154,10 @@ int p2m_init(struct domain *d)
  * hvm fixme: when adding support for pvh non-hardware domains, this path must
  * cleanup any foreign p2m types (release refcnts on them).
  */
-void p2m_teardown(struct p2m_domain *p2m)
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root)
 {
 #ifdef CONFIG_HVM
-    struct page_info *pg;
+    struct page_info *pg, *root_pg = NULL;
     struct domain *d;
 
     if ( !p2m )
@@ -171,10 +171,20 @@ void p2m_teardown(struct p2m_domain *p2m)
     ASSERT(atomic_read(&d->shr_pages) == 0);
 #endif
 
-    p2m->phys_table = pagetable_null();
+    if ( remove_root )
+        p2m->phys_table = pagetable_null();
+    else if ( !pagetable_is_null(p2m->phys_table) )
+    {
+        root_pg = pagetable_get_page(p2m->phys_table);
+        clear_domain_page(pagetable_get_mfn(p2m->phys_table));
+    }
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
-        d->arch.paging.free_page(d, pg);
+        if ( pg != root_pg )
+            d->arch.paging.free_page(d, pg);
+
+    if ( root_pg )
+        page_list_add(root_pg, &p2m->pages);
 
     p2m_unlock(p2m);
 #endif
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0247f0c84e..3e1e43a389 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2707,7 +2707,7 @@ int shadow_enable(struct domain *d, u32 mode)
  out_unlocked:
 #ifdef CONFIG_HVM
     if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
-        p2m_teardown(p2m);
+        p2m_teardown(p2m, true);
 #endif
     if ( rv != 0 && pg != NULL )
     {
@@ -2873,7 +2873,7 @@ void shadow_final_teardown(struct domain *d)
         shadow_teardown(d, NULL);
 
     /* It is now safe to pull down the p2m map. */
-    p2m_teardown(p2m_get_hostp2m(d));
+    p2m_teardown(p2m_get_hostp2m(d), true);
     /* Free any shadow memory that the p2m teardown released */
     paging_lock(d);
     shadow_set_allocation(d, 0, NULL);
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.