[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v7 5/7] xen: re-define assign_pages and introduce a new function assign_page
In order to deal with the trouble of count-to-order conversion when page number is not in a power-of-two, this commit re-define assign_pages for nr pages and assign_page for original page with a single order. Backporting confusion could be helped by altering the order of assign_pages parameters, such that the compiler would point out that adjustments at call sites are needed. Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx> --- xen/arch/x86/pv/dom0_build.c | 2 +- xen/common/grant_table.c | 2 +- xen/common/memory.c | 6 +++--- xen/common/page_alloc.c | 23 ++++++++++++++--------- xen/include/xen/mm.h | 6 ++++++ 5 files changed, 25 insertions(+), 14 deletions(-) diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c index d7f9e04b28..77efd3918c 100644 --- a/xen/arch/x86/pv/dom0_build.c +++ b/xen/arch/x86/pv/dom0_build.c @@ -568,7 +568,7 @@ int __init dom0_construct_pv(struct domain *d, else { while ( count-- ) - if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) ) + if ( assign_pages(mfn_to_page(_mfn(mfn++)), 1, d, 0) ) BUG(); } initrd->mod_end = 0; diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index e80f8d044d..fe1fc11b22 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -2358,7 +2358,7 @@ gnttab_transfer( * is respected and speculative execution is blocked accordingly */ if ( unlikely(!evaluate_nospec(okay)) || - unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) ) + unlikely(assign_pages(page, 1, e, MEMF_no_refcount)) ) { bool drop_dom_ref; diff --git a/xen/common/memory.c b/xen/common/memory.c index 74babb0bd7..63642278fd 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -728,8 +728,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) /* Assign each output page to the domain. */ for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j ) { - if ( assign_pages(d, page, exch.out.extent_order, - MEMF_no_refcount) ) + if ( assign_page(page, exch.out.extent_order, d, + MEMF_no_refcount) ) { unsigned long dec_count; bool_t drop_dom_ref; @@ -797,7 +797,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) * cleared PGC_allocated. */ while ( (page = page_list_remove_head(&in_chunk_list)) ) - if ( assign_pages(d, page, 0, MEMF_no_refcount) ) + if ( assign_pages(page, 1, d, MEMF_no_refcount) ) { BUG_ON(!d->is_dying); free_domheap_page(page); diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index ba7adc80db..2aa8edac8c 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -2259,9 +2259,9 @@ void init_domheap_pages(paddr_t ps, paddr_t pe) int assign_pages( - struct domain *d, struct page_info *pg, - unsigned int order, + unsigned long nr, + struct domain *d, unsigned int memflags) { int rc = 0; @@ -2281,7 +2281,7 @@ int assign_pages( { unsigned int extra_pages = 0; - for ( i = 0; i < (1ul << order); i++ ) + for ( i = 0; i < nr; i++ ) { ASSERT(!(pg[i].count_info & ~PGC_extra)); if ( pg[i].count_info & PGC_extra ) @@ -2290,18 +2290,18 @@ int assign_pages( ASSERT(!extra_pages || ((memflags & MEMF_no_refcount) && - extra_pages == 1u << order)); + extra_pages == nr)); } #endif if ( pg[0].count_info & PGC_extra ) { - d->extra_pages += 1u << order; + d->extra_pages += nr; memflags &= ~MEMF_no_refcount; } else if ( !(memflags & MEMF_no_refcount) ) { - unsigned int tot_pages = domain_tot_pages(d) + (1 << order); + unsigned int tot_pages = domain_tot_pages(d) + nr; if ( unlikely(tot_pages > d->max_pages) ) { @@ -2313,10 +2313,10 @@ int assign_pages( } if ( !(memflags & MEMF_no_refcount) && - unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) ) + unlikely(domain_adjust_tot_pages(d, nr) == nr) ) get_knownalive_domain(d); - for ( i = 0; i < (1 << order); i++ ) + for ( i = 0; i < nr; i++ ) { ASSERT(page_get_owner(&pg[i]) == NULL); page_set_owner(&pg[i], d); @@ -2331,6 +2331,11 @@ int assign_pages( return rc; } +int assign_page(struct page_info *pg, unsigned int order, struct domain *d, + unsigned int memflags) +{ + return assign_pages(pg, 1UL << order, d, memflags); +} struct page_info *alloc_domheap_pages( struct domain *d, unsigned int order, unsigned int memflags) @@ -2373,7 +2378,7 @@ struct page_info *alloc_domheap_pages( pg[i].count_info = PGC_extra; } } - if ( assign_pages(d, pg, order, memflags) ) + if ( assign_page(pg, order, d, memflags) ) { free_heap_pages(pg, order, memflags & MEMF_no_scrub); return NULL; diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 8e8fb5a615..9d6a45174e 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -132,9 +132,15 @@ int query_page_offline(mfn_t mfn, uint32_t *status); void heap_init_late(void); int assign_pages( + struct page_info *pg, + unsigned long nr, struct domain *d, + unsigned int memflags); + +int assign_page( struct page_info *pg, unsigned int order, + struct domain *d, unsigned int memflags); /* Dump info to serial console */ -- 2.25.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |