[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/shim: map and unmap page tables in replace_va_mapping



commit 6c8afe5aadb33761431b24157d99b25eac15fc7e
Author:     Wei Liu <wei.liu2@xxxxxxxxxx>
AuthorDate: Thu Apr 16 11:01:46 2020 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Apr 16 11:01:46 2020 +0200

    x86/shim: map and unmap page tables in replace_va_mapping
    
    Also, introduce lYe_from_lXe() macros which do not rely on the direct
    map when walking page tables. Unfortunately, they cannot be inline
    functions due to the header dependency on domain_page.h, so keep them as
    macros just like map_lYt_from_lXe().
    
    Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/pv/shim.c     |  9 +++++----
 xen/include/asm-x86/page.h | 19 +++++++++++++++++++
 2 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index ed2ece8a8a..31264582cc 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -168,16 +168,17 @@ const struct platform_bad_page *__init 
pv_shim_reserved_pages(unsigned int *size
 static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start,
                                       unsigned long va, mfn_t mfn)
 {
-    l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
-    l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
-    l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
-    l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+    l4_pgentry_t l4e = l4start[l4_table_offset(va)];
+    l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
+    l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
+    l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) + l1_table_offset(va);
     struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
 
     put_page_and_type(page);
 
     *pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
                                                       : COMPAT_L1_PROT));
+    unmap_domain_page(pl1e);
 }
 
 static void evtchn_reserve(struct domain *d, unsigned int port)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index eb73a0fc23..5acf3d3d5a 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -197,6 +197,25 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, 
unsigned int flags)
 #define map_l2t_from_l3e(x)        (l2_pgentry_t 
*)map_domain_page(l3e_get_mfn(x))
 #define map_l3t_from_l4e(x)        (l3_pgentry_t 
*)map_domain_page(l4e_get_mfn(x))
 
+/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */
+#define l1e_from_l2e(l2e_, offset_) ({                      \
+        const l1_pgentry_t *l1t_ = map_l1t_from_l2e(l2e_);  \
+        l1_pgentry_t l1e_ = l1t_[offset_];                  \
+        unmap_domain_page(l1t_);                            \
+        l1e_; })
+
+#define l2e_from_l3e(l3e_, offset_) ({                      \
+        const l2_pgentry_t *l2t_ = map_l2t_from_l3e(l3e_);  \
+        l2_pgentry_t l2e_ = l2t_[offset_];                  \
+        unmap_domain_page(l2t_);                            \
+        l2e_; })
+
+#define l3e_from_l4e(l4e_, offset_) ({                      \
+        const l3_pgentry_t *l3t_ = map_l3t_from_l4e(l4e_);  \
+        l3_pgentry_t l3e_ = l3t_[offset_];                  \
+        unmap_domain_page(l3t_);                            \
+        l3e_; })
+
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(a)         \
     (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.