[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86: introduce a new set of APIs to manage Xen page tables

commit cb6a6e9527c0f87708dce9facb74d11f0169992b
Author:     Wei Liu <wei.liu2@xxxxxxxxxx>
AuthorDate: Tue Jan 28 13:50:05 2020 +0000
Commit:     Wei Liu <wl@xxxxxxx>
CommitDate: Thu Feb 20 12:45:32 2020 +0000

    x86: introduce a new set of APIs to manage Xen page tables
    We are going to switch to using domheap page for page tables.
    A new set of APIs is introduced to allocate and free pages of page
    tables based on mfn instead of the xenheap direct map address. The
    allocation and deallocation work on mfn_t but not page_info, because
    they are required to work even before frame table is set up.
    Implement the old functions with the new ones. We will rewrite, site
    by site, other mm functions that manipulate page tables to use the new
    After the allocation, one needs to map and unmap via map_domain_page to
    access the PTEs. This does not break xen half way, since the new APIs
    still use xenheap pages underneath, and map_domain_page will just use
    the directmap for mappings. They will be switched to use domheap and
    dynamic mappings when usage of old APIs is eliminated.
    No functional change intended in this patch.
    Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
    Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx>
    Reviewed-by: Wei Liu <wl@xxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
 xen/arch/x86/mm.c        | 32 +++++++++++++++++++++++++++-----
 xen/include/asm-x86/mm.h |  3 +++
 2 files changed, 30 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index edc238e51a..70b87c4830 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -120,6 +120,7 @@
 #include <xen/efi.h>
 #include <xen/grant_table.h>
 #include <xen/hypercall.h>
+#include <xen/mm.h>
 #include <asm/paging.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
@@ -4945,21 +4946,42 @@ int mmcfg_intercept_write(
 void *alloc_xen_pagetable(void)
+    mfn_t mfn = alloc_xen_pagetable_new();
+    return mfn_eq(mfn, INVALID_MFN) ? NULL : mfn_to_virt(mfn_x(mfn));
+void free_xen_pagetable(void *v)
+    mfn_t mfn = v ? virt_to_mfn(v) : INVALID_MFN;
+    free_xen_pagetable_new(mfn);
+ * For these PTE APIs, the caller must follow the alloc-map-unmap-free
+ * lifecycle, which means explicitly mapping the PTE pages before accessing
+ * them. The caller must check whether the allocation has succeeded, and only
+ * pass valid MFNs to map_domain_page().
+ */
+mfn_t alloc_xen_pagetable_new(void)
     if ( system_state != SYS_STATE_early_boot )
         void *ptr = alloc_xenheap_page();
         BUG_ON(!hardware_domain && !ptr);
-        return ptr;
+        return ptr ? virt_to_mfn(ptr) : INVALID_MFN;
-    return mfn_to_virt(mfn_x(alloc_boot_pages(1, 1)));
+    return alloc_boot_pages(1, 1);
-void free_xen_pagetable(void *v)
+/* mfn can be INVALID_MFN */
+void free_xen_pagetable_new(mfn_t mfn)
-    if ( system_state != SYS_STATE_early_boot )
-        free_xenheap_page(v);
+    if ( system_state != SYS_STATE_early_boot && !mfn_eq(mfn, INVALID_MFN) )
+        free_xenheap_page(mfn_to_virt(mfn_x(mfn)));
 static DEFINE_SPINLOCK(map_pgdir_lock);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index fafb3af46d..a06b2fb81f 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -583,6 +583,9 @@ void *do_page_walk(struct vcpu *v, unsigned long addr);
 /* Allocator functions for Xen pagetables. */
 void *alloc_xen_pagetable(void);
 void free_xen_pagetable(void *v);
+mfn_t alloc_xen_pagetable_new(void);
+void free_xen_pagetable_new(mfn_t mfn);
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
 int __sync_local_execstate(void);
generated by git-patchbot for /home/xen/git/xen.git#staging

Xen-changelog mailing list



Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.