[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 13/15] memory: add get_paged_gfn() as a wrapper...
...for some uses of get_page_from_gfn(). There are many occurences of the following pattern in the code: q = <readonly look-up> ? P2M_ALLOC : P2M_UNSHARE; page = get_page_from_gfn(d, gfn, &p2mt, q); if ( p2m_is_paging(p2mt) ) { if ( page ) put_page(page); p2m_mem_paging_populate(d, gfn); return <-EAGAIN or equivalent>; } if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) ) { if ( page ) put_page(page); return <-EAGAIN or equivalent>; } if ( !page ) return <-EINVAL or equivalent>; if ( !p2m_is_ram(p2mt) || (!<readonly look-up> && p2m_is_readonly(p2mt)) ) { put_page(page); return <-EINVAL or equivalent>; } There are some small differences between the exact way the occurrences are coded but the desired semantic is the same. This patch introduces a new common implementation of this code in get_paged_gfn() and then converts the various open-coded patterns into calls to this new function. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> v3: - Addressed comments from George. v2: - New in v2. --- xen/arch/x86/hvm/emulate.c | 32 ++++++-------------------- xen/arch/x86/hvm/hvm.c | 16 ++----------- xen/common/grant_table.c | 38 +++++++++---------------------- xen/common/memory.c | 56 +++++++++++++++++++++++++++++++++++++--------- xen/include/asm-arm/p2m.h | 3 +++ xen/include/asm-x86/p2m.h | 2 ++ 6 files changed, 71 insertions(+), 76 deletions(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 8385c62145..c26281ea1c 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -332,34 +332,16 @@ static int hvmemul_do_io_buffer( static int hvmemul_acquire_page(unsigned long gmfn, struct page_info **page) { - struct domain *curr_d = current->domain; - p2m_type_t p2mt; - - *page = get_page_from_gfn(curr_d, gmfn, &p2mt, P2M_UNSHARE); - - if ( *page == NULL ) - return X86EMUL_UNHANDLEABLE; - - if ( p2m_is_paging(p2mt) ) - { - put_page(*page); - p2m_mem_paging_populate(curr_d, gmfn); - return X86EMUL_RETRY; - } - - if ( p2m_is_shared(p2mt) ) + switch ( get_paged_gfn(current->domain, _gfn(gmfn), false, NULL, page) ) { - put_page(*page); + case -EAGAIN: return X86EMUL_RETRY; - } - - /* This code should not be reached if the gmfn is not RAM */ - if ( p2m_is_mmio(p2mt) ) - { - domain_crash(curr_d); - - put_page(*page); + case -EINVAL: return X86EMUL_UNHANDLEABLE; + default: + ASSERT_UNREACHABLE(); + case 0: + break; } return X86EMUL_OKAY; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 72c51faecb..03430e6f07 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2557,24 +2557,12 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent, bool_t *writable) { void *map; - p2m_type_t p2mt; struct page_info *page; struct domain *d = current->domain; + p2m_type_t p2mt; - page = get_page_from_gfn(d, gfn, &p2mt, - writable ? P2M_UNSHARE : P2M_ALLOC); - if ( (p2m_is_shared(p2mt) && writable) || !page ) - { - if ( page ) - put_page(page); - return NULL; - } - if ( p2m_is_paging(p2mt) ) - { - put_page(page); - p2m_mem_paging_populate(d, gfn); + if ( get_paged_gfn(d, _gfn(gfn), !writable, &p2mt, &page) ) return NULL; - } if ( writable ) { diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 1840b656c9..cc7080bc98 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -366,39 +366,23 @@ static int get_paged_frame(unsigned long gfn, mfn_t *mfn, struct page_info **page, bool readonly, struct domain *rd) { - int rc = GNTST_okay; - p2m_type_t p2mt; - - *mfn = INVALID_MFN; - *page = get_page_from_gfn(rd, gfn, &p2mt, - readonly ? P2M_ALLOC : P2M_UNSHARE); - if ( !*page ) - { -#ifdef P2M_SHARED_TYPES - if ( p2m_is_shared(p2mt) ) - return GNTST_eagain; -#endif -#ifdef P2M_PAGES_TYPES - if ( p2m_is_paging(p2mt) ) - { - p2m_mem_paging_populate(rd, gfn); - return GNTST_eagain; - } -#endif - return GNTST_bad_page; - } + int rc; - if ( p2m_is_foreign(p2mt) ) + rc = get_paged_gfn(rd, _gfn(gfn), readonly, NULL, page); + switch ( rc ) { - put_page(*page); - *page = NULL; - + case -EAGAIN: + return GNTST_eagain; + case -EINVAL: return GNTST_bad_page; + default: + ASSERT_UNREACHABLE(); + case 0: + break; } *mfn = page_to_mfn(*page); - - return rc; + return GNTST_okay; } static inline void diff --git a/xen/common/memory.c b/xen/common/memory.c index ad7aa09a5c..4c34e6c2d9 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1571,37 +1571,73 @@ void destroy_ring_for_helper( } } -int prepare_ring_for_helper( - struct domain *d, unsigned long gmfn, struct page_info **_page, - void **_va) +/* + * Acquire a pointer to struct page_info for a specified doman and GFN, + * checking whether the page has been paged out, or needs unsharing. + * If the function succeeds then zero is returned and page_p is written + * with a pointer to the struct page_info with a reference taken. The + * caller is responsible for dropping the reference. If p2mt_p is non-NULL + * then it is also written with the P2M type of the page. + * If the function fails then an appropriate errno is returned and the + * values referenced by page_p and p2mt_p are undefined. + */ +int get_paged_gfn(struct domain *d, gfn_t gfn, bool readonly, + p2m_type_t *p2mt_p, struct page_info **page_p) { - struct page_info *page; + p2m_query_t q = readonly ? P2M_ALLOC : P2M_UNSHARE; p2m_type_t p2mt; - void *va; + struct page_info *page; - page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE); + page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q); #ifdef CONFIG_HAS_MEM_PAGING if ( p2m_is_paging(p2mt) ) { if ( page ) put_page(page); - p2m_mem_paging_populate(d, gmfn); - return -ENOENT; + + p2m_mem_paging_populate(d, gfn_x(gfn)); + return -EAGAIN; } #endif #ifdef CONFIG_HAS_MEM_SHARING - if ( p2m_is_shared(p2mt) ) + if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) ) { if ( page ) put_page(page); - return -ENOENT; + + return -EAGAIN; } #endif if ( !page ) return -EINVAL; + if ( !p2m_is_ram(p2mt) || (!readonly && p2m_is_readonly(p2mt)) ) + { + put_page(page); + return -EINVAL; + } + + if ( p2mt_p ) + *p2mt_p = p2mt; + + *page_p = page; + return 0; +} + +int prepare_ring_for_helper( + struct domain *d, unsigned long gmfn, struct page_info **_page, + void **_va) +{ + struct page_info *page; + void *va; + int rc; + + rc = get_paged_gfn(d, _gfn(gmfn), false, NULL, &page); + if ( rc ) + return (rc == -EAGAIN) ? -ENOENT : rc; + if ( !get_page_type(page, PGT_writable_page) ) { put_page(page); diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 8823707c17..a39a4faabd 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -303,6 +303,9 @@ static inline struct page_info *get_page_from_gfn( return page; } +int get_paged_gfn(struct domain *d, gfn_t gfn, bool readonly, + p2m_type_t *p2mt_p, struct page_info **page_p); + int get_page_type(struct page_info *page, unsigned long type); bool is_iomem_page(mfn_t mfn); static inline int get_page_and_type(struct page_info *page, diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index d4b3cfcb6e..e890bcd3e1 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -492,6 +492,8 @@ static inline struct page_info *get_page_from_gfn( return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; } +int get_paged_gfn(struct domain *d, gfn_t gfn, bool readonly, + p2m_type_t *p2mt_p, struct page_info **page_p); /* General conversion function from mfn to gfn */ static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn) -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |