[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v9 5/7] memory: add check_get_page_from_gfn() as a wrapper...
...for some uses of get_page_from_gfn(). There are many occurrences of the following pattern in the code: q = <readonly look-up> ? P2M_ALLOC : P2M_UNSHARE; page = get_page_from_gfn(d, gfn, &p2mt, q); if ( p2m_is_paging(p2mt) ) { if ( page ) put_page(page); p2m_mem_paging_populate(d, gfn); return <-EAGAIN or equivalent>; } if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) ) { if ( page ) put_page(page); return <-EAGAIN or equivalent>; } if ( !page ) return <-EINVAL or equivalent>; There are some small differences between the exact way the occurrences are coded but the desired semantic is the same. This patch introduces a new common implementation of this code in check_get_page_from_gfn() and then converts the various open-coded patterns into calls to this new function. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Roger Pau Monne <roger.pau@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> v9: - Defer P2M type checks (beyond shared or paging) to the caller. v7: - Fix ARM build by introducing p2m_is_readonly() predicate. - Re-name get_paged_frame() -> check_get_page_from_gfn(). - Adjust default cases of callers switch-ing on return value. v3: - Addressed comments from George. v2: - New in v2. --- xen/arch/x86/hvm/emulate.c | 25 ++++++++++++----------- xen/arch/x86/hvm/hvm.c | 14 +------------ xen/common/grant_table.c | 32 ++++++++++++++---------------- xen/common/memory.c | 49 ++++++++++++++++++++++++++++++++++++---------- xen/include/asm-arm/p2m.h | 4 ++++ xen/include/asm-x86/p2m.h | 3 +++ 6 files changed, 74 insertions(+), 53 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index a577685dc6..480840b202 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -356,22 +356,21 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct page_info **page) struct domain *curr_d = current->domain; p2m_type_t p2mt; - *page = get_page_from_gfn(curr_d, gmfn, &p2mt, P2M_UNSHARE); - - if ( *page == NULL ) - return X86EMUL_UNHANDLEABLE; - - if ( p2m_is_paging(p2mt) ) + switch ( check_get_page_from_gfn(curr_d, _gfn(gmfn), false, &p2mt, + page) ) { - put_page(*page); - p2m_mem_paging_populate(curr_d, gmfn); - return X86EMUL_RETRY; - } + case 0: + break; - if ( p2m_is_shared(p2mt) ) - { - put_page(*page); + case -EAGAIN: return X86EMUL_RETRY; + + default: + ASSERT_UNREACHABLE(); + /* Fallthrough */ + + case -EINVAL: + return X86EMUL_UNHANDLEABLE; } /* This code should not be reached if the gmfn is not RAM */ diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index fe6c9c592f..6bb1da07eb 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2536,20 +2536,8 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent, struct page_info *page; struct domain *d = current->domain; - page = get_page_from_gfn(d, gfn, &p2mt, - writable ? P2M_UNSHARE : P2M_ALLOC); - if ( (p2m_is_shared(p2mt) && writable) || !page ) - { - if ( page ) - put_page(page); - return NULL; - } - if ( p2m_is_paging(p2mt) ) - { - put_page(page); - p2m_mem_paging_populate(d, gfn); + if ( check_get_page_from_gfn(d, _gfn(gfn), !writable, &p2mt, &page) ) return NULL; - } if ( writable ) { diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 0f0b7b1a49..3604a8812c 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -374,25 +374,23 @@ static int get_paged_frame(unsigned long gfn, mfn_t *mfn, struct page_info **page, bool readonly, struct domain *rd) { - int rc = GNTST_okay; p2m_type_t p2mt; + int rc; - *mfn = INVALID_MFN; - *page = get_page_from_gfn(rd, gfn, &p2mt, - readonly ? P2M_ALLOC : P2M_UNSHARE); - if ( !*page ) + rc = check_get_page_from_gfn(rd, _gfn(gfn), readonly, &p2mt, page); + switch ( rc ) { -#ifdef P2M_SHARED_TYPES - if ( p2m_is_shared(p2mt) ) - return GNTST_eagain; -#endif -#ifdef P2M_PAGES_TYPES - if ( p2m_is_paging(p2mt) ) - { - p2m_mem_paging_populate(rd, gfn); - return GNTST_eagain; - } -#endif + case 0: + break; + + case -EAGAIN: + return GNTST_eagain; + + default: + ASSERT_UNREACHABLE(); + /* Fallthrough */ + + case -EINVAL: return GNTST_bad_page; } @@ -406,7 +404,7 @@ static int get_paged_frame(unsigned long gfn, mfn_t *mfn, *mfn = page_to_mfn(*page); - return rc; + return GNTST_okay; } static inline void diff --git a/xen/common/memory.c b/xen/common/memory.c index 85611ddae4..9b592d4f66 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1625,37 +1625,66 @@ void destroy_ring_for_helper( } } -int prepare_ring_for_helper( - struct domain *d, unsigned long gmfn, struct page_info **_page, - void **_va) +/* + * Acquire a pointer to struct page_info for a specified doman and GFN, + * checking whether the page has been paged out, or needs unsharing. + * If the function succeeds then zero is returned, page_p is written + * with a pointer to the struct page_info with a reference taken, and + * p2mt_p it is written with the P2M type of the page. The caller is + * responsible for dropping the reference. + * If the function fails then an appropriate errno is returned and the + * values referenced by page_p and p2mt_p are undefined. + */ +int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly, + p2m_type_t *p2mt_p, struct page_info **page_p) { - struct page_info *page; + p2m_query_t q = readonly ? P2M_ALLOC : P2M_UNSHARE; p2m_type_t p2mt; - void *va; + struct page_info *page; - page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE); + page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q); #ifdef CONFIG_HAS_MEM_PAGING if ( p2m_is_paging(p2mt) ) { if ( page ) put_page(page); - p2m_mem_paging_populate(d, gmfn); - return -ENOENT; + + p2m_mem_paging_populate(d, gfn_x(gfn)); + return -EAGAIN; } #endif #ifdef CONFIG_HAS_MEM_SHARING - if ( p2m_is_shared(p2mt) ) + if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) ) { if ( page ) put_page(page); - return -ENOENT; + + return -EAGAIN; } #endif if ( !page ) return -EINVAL; + *p2mt_p = p2mt; + *page_p = page; + return 0; +} + +int prepare_ring_for_helper( + struct domain *d, unsigned long gmfn, struct page_info **_page, + void **_va) +{ + p2m_type_t p2mt; + struct page_info *page; + void *va; + int rc; + + rc = check_get_page_from_gfn(d, _gfn(gmfn), false, &p2mt, &page); + if ( rc ) + return (rc == -EAGAIN) ? -ENOENT : rc; + if ( !get_page_type(page, PGT_writable_page) ) { put_page(page); diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 8823707c17..377b591bc6 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -303,6 +303,10 @@ static inline struct page_info *get_page_from_gfn( return page; } +int __must_check check_get_page_from_gfn(struct domain *d, gfn_t gfn, + bool readonly, p2m_type_t *p2mt_p, + struct page_info **page_p); + int get_page_type(struct page_info *page, unsigned long type); bool is_iomem_page(mfn_t mfn); static inline int get_page_and_type(struct page_info *page, diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index d4b3cfcb6e..8613df42ce 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -492,6 +492,9 @@ static inline struct page_info *get_page_from_gfn( return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; } +int __must_check check_get_page_from_gfn(struct domain *d, gfn_t gfn, + bool readonly, p2m_type_t *p2mt_p, + struct page_info **page_p); /* General conversion function from mfn to gfn */ static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn) -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |