|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 1/4] x86/shim: map and unmap page tables in replace_va_mapping
From: Wei Liu <wei.liu2@xxxxxxxxxx>
Also, introduce lYe_from_lXe() macros which do not rely on the direct
map when walking page tables. Unfortunately, they cannot be inline
functions due to the header dependency on domain_page.h, so keep them as
macros just like map_lYt_from_lXe().
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
---
Changed in v3:
- use unmap_domain_page() instead of the macro in several places.
- also introduce l1e_from_l2e().
- add _ prefix in macros to avoid aliasing.
Changed in v2:
- instead of map, map, map, read/write, unmap, unmap, unmap, do map,
read PTE, unmap for each level instead.
- use lYe_from_lXe() macros and lift them from a later patch to this
patch.
- const qualify pointers in new macros.
---
xen/arch/x86/pv/shim.c | 9 +++++----
xen/include/asm-x86/page.h | 19 +++++++++++++++++++
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index ed2ece8a8a..31264582cc 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -168,16 +168,17 @@ const struct platform_bad_page *__init
pv_shim_reserved_pages(unsigned int *size
static void __init replace_va_mapping(struct domain *d, l4_pgentry_t *l4start,
unsigned long va, mfn_t mfn)
{
- l4_pgentry_t *pl4e = l4start + l4_table_offset(va);
- l3_pgentry_t *pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(va);
- l2_pgentry_t *pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(va);
- l1_pgentry_t *pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+ l4_pgentry_t l4e = l4start[l4_table_offset(va)];
+ l3_pgentry_t l3e = l3e_from_l4e(l4e, l3_table_offset(va));
+ l2_pgentry_t l2e = l2e_from_l3e(l3e, l2_table_offset(va));
+ l1_pgentry_t *pl1e = map_l1t_from_l2e(l2e) + l1_table_offset(va);
struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e));
put_page_and_type(page);
*pl1e = l1e_from_mfn(mfn, (!is_pv_32bit_domain(d) ? L1_PROT
: COMPAT_L1_PROT));
+ unmap_domain_page(pl1e);
}
static void evtchn_reserve(struct domain *d, unsigned int port)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index eb73a0fc23..d50989a357 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -197,6 +197,25 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa,
unsigned int flags)
#define map_l2t_from_l3e(x) (l2_pgentry_t
*)map_domain_page(l3e_get_mfn(x))
#define map_l3t_from_l4e(x) (l3_pgentry_t
*)map_domain_page(l4e_get_mfn(x))
+/* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */
+#define l1e_from_l2e(_l2e, _offset) ({ \
+ const l1_pgentry_t *_l1t = map_l1t_from_l2e(_l2e); \
+ l1_pgentry_t _l1e = _l1t[_offset]; \
+ unmap_domain_page(_l1t); \
+ _l1e; })
+
+#define l2e_from_l3e(_l3e, _offset) ({ \
+ const l2_pgentry_t *_l2t = map_l2t_from_l3e(_l3e); \
+ l2_pgentry_t _l2e = _l2t[_offset]; \
+ unmap_domain_page(_l2t); \
+ _l2e; })
+
+#define l3e_from_l4e(_l4e, _offset) ({ \
+ const l3_pgentry_t *_l3t = map_l3t_from_l4e(_l4e); \
+ l3_pgentry_t _l3e = _l3t[_offset]; \
+ unmap_domain_page(_l3t); \
+ _l3e; })
+
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(a) \
(((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
--
2.24.1.AMZN
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |