[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4] x86/mm: Aggregate get entry and populate into common funcs
The code for getting the entry and then populating was repeated in p2m_change_altp2m_gfn() and in p2m_set_altp2m_mem_access(). The code is now in one place with a bool param that lets the caller choose if it populates after get_entry(). If remapping is being done then both the old and new gfn's should be unshared in the hostp2m for keeping things consistent. The page type of old_gfn was already checked whether it's p2m_ram_rw and bail if it wasn't so functionality-wise this just simplifies things as a user doesn't have to request unsharing manually before remapping. Now, if the new_gfn is invalid it shouldn't query the hostp2m as that is effectively a request to remove the entry from the altp2m. But provided that scenario is used only when removing entries that were previously remapped/copied to the altp2m, those entries already went through P2M_ALLOC | P2M_UNSHARE before, so it won't have an affect so the core function get_altp2m_entry() is calling __get_gfn_type_access() with P2M_ALLOC | P2M_UNSHARE. altp2m_get_entry_direct() is also called in p2m_set_suppress_ve() because on a new altp2m view the function will fail with invalid mfn if p2m->set_entry() was not called before. Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx> Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- Changes since V3: - Switched over to George's version --- xen/arch/x86/mm/mem_access.c | 30 ++----------- xen/arch/x86/mm/p2m.c | 84 ++++++++++++++++++++---------------- xen/include/asm-x86/p2m.h | 17 ++++++++ 3 files changed, 66 insertions(+), 65 deletions(-) diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c index a144bb0ce4..ddfe0169c0 100644 --- a/xen/arch/x86/mm/mem_access.c +++ b/xen/arch/x86/mm/mem_access.c @@ -262,35 +262,11 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, mfn_t mfn; p2m_type_t t; p2m_access_t old_a; - unsigned int page_order; - unsigned long gfn_l = gfn_x(gfn); int rc; - mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL); - - /* Check host p2m if no valid entry in alternate */ - if ( !mfn_valid(mfn) ) - { - - mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, - P2M_ALLOC | P2M_UNSHARE, &page_order, 0); - - rc = -ESRCH; - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) - return rc; - - /* If this is a superpage, copy that first */ - if ( page_order != PAGE_ORDER_4K ) - { - unsigned long mask = ~((1UL << page_order) - 1); - gfn_t gfn2 = _gfn(gfn_l & mask); - mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); - - rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); - if ( rc ) - return rc; - } - } + rc = altp2m_get_entry_prepopulate(ap2m, gfn, &mfn, &t, &old_a); + if ( rc ) + return rc; /* * Inherit the old suppress #VE bit value if it is already set, or set it diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 9e81a30cc4..6f97650313 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -478,6 +478,43 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m) mm_write_unlock(&p2m->lock); } +int get_altp2m_entry(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, p2m_type_t *t, + p2m_access_t *a, bool prepopulate) +{ + *mfn = ap2m->get_entry(ap2m, gfn, t, a, 0, NULL, NULL); + + /* Check host p2m if no valid entry in alternate */ + if ( !mfn_valid(*mfn) && !p2m_is_hostp2m(ap2m) ) + { + struct p2m_domain *hp2m = p2m_get_hostp2m(ap2m->domain); + unsigned int page_order; + int rc; + + *mfn = __get_gfn_type_access(hp2m, gfn_x(gfn), t, a, + P2M_ALLOC | P2M_UNSHARE, &page_order, 0); + + rc = -ESRCH; + if ( !mfn_valid(*mfn) || *t != p2m_ram_rw ) + return rc; + + /* If this is a superpage, copy that first */ + if ( prepopulate && page_order != PAGE_ORDER_4K ) + { + unsigned long mask = ~((1UL << page_order) - 1); + gfn_t gfn_aligned = _gfn(gfn_x(gfn) & mask); + mfn_t mfn_aligned = _mfn(mfn_x(*mfn) & mask); + + rc = ap2m->set_entry(ap2m, gfn_aligned, mfn_aligned, page_order, *t, *a, 1); + if ( rc ) + return rc; + } + } + + return 0; +} + + mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked) @@ -2618,7 +2655,6 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, p2m_access_t a; p2m_type_t t; mfn_t mfn; - unsigned int page_order; int rc = -EINVAL; if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) @@ -2630,47 +2666,21 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, p2m_lock(hp2m); p2m_lock(ap2m); - mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL); - if ( gfn_eq(new_gfn, INVALID_GFN) ) { + mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL); if ( mfn_valid(mfn) ) p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K); rc = 0; goto out; } - /* Check host p2m if no valid entry in alternate */ - if ( !mfn_valid(mfn) ) - { - mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a, - P2M_ALLOC, &page_order, 0); - - if ( !mfn_valid(mfn) || t != p2m_ram_rw ) - goto out; - - /* If this is a superpage, copy that first */ - if ( page_order != PAGE_ORDER_4K ) - { - gfn_t gfn; - unsigned long mask; - - mask = ~((1UL << page_order) - 1); - gfn = _gfn(gfn_x(old_gfn) & mask); - mfn = _mfn(mfn_x(mfn) & mask); - - if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a, 1) ) - goto out; - } - } - - mfn = ap2m->get_entry(ap2m, new_gfn, &t, &a, 0, NULL, NULL); - - if ( !mfn_valid(mfn) ) - mfn = hp2m->get_entry(hp2m, new_gfn, &t, &a, 0, NULL, NULL); + rc = altp2m_get_entry_prepopulate(ap2m, old_gfn, &mfn, &t, &a); + if ( rc ) + goto out; - /* Note: currently it is not safe to remap to a shared entry */ - if ( !mfn_valid(mfn) || (t != p2m_ram_rw) ) + rc = altp2m_get_entry_direct(ap2m, new_gfn, &mfn, &t, &a); + if ( rc ) goto out; if ( !ap2m->set_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K, t, a, @@ -3002,12 +3012,10 @@ int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve, if ( ap2m ) p2m_lock(ap2m); - mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); - if ( !mfn_valid(mfn) ) - { - rc = -ESRCH; + rc = altp2m_get_entry_direct(p2m, gfn, &mfn, &t, &a); + + if ( rc ) goto out; - } rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, suppress_ve); diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 2801a8ccca..7b9f035bb3 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -514,6 +514,23 @@ static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn) return mfn_x(mfn); } +int get_altp2m_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a, bool prepopulate); + +static inline int altp2m_get_entry_direct(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a) +{ + return get_altp2m_entry(ap2m, gfn, mfn, t, a, false); +} + +static inline int altp2m_get_entry_prepopulate(struct p2m_domain *ap2m, + gfn_t gfn, mfn_t *mfn, + p2m_type_t *t, p2m_access_t *a) +{ + return get_altp2m_entry(ap2m, gfn, mfn, t, a, true); +} + /* Deadlock-avoidance scheme when calling get_gfn on different gfn's */ struct two_gfns { struct domain *first_domain, *second_domain; -- 2.17.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |