[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/dom0: take alignment into account when populating p2m in PVH mode
commit ae7fc10d2ca5c22e04b8a28becbd1fbf8b44e83a Author: Roger Pau Monne <roger.pau@xxxxxxxxxx> AuthorDate: Fri Dec 28 12:18:56 2018 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Fri Dec 28 14:32:57 2018 +0000 x86/dom0: take alignment into account when populating p2m in PVH mode Current code that allocates memory and populates the p2m for PVH Dom0 doesn't take the address alignment into account, this can lead to high order allocations that start on a non-aligned address to be broken down into lower order entries on the p2m page tables. Fix this by taking into account the p2m page sizes and alignment requirements when allocating the memory and populating the p2m. Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Tested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/hvm/dom0_build.c | 49 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index 7ea29c443a..5ae3a32060 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -91,16 +91,55 @@ static int __init pvh_populate_memory_range(struct domain *d, unsigned long start, unsigned long nr_pages) { - unsigned int order = MAX_ORDER, i = 0; + struct { + unsigned long align; + unsigned int order; + } static const __initconst orders[] = { + /* NB: must be sorted by decreasing size. */ + { .align = PFN_DOWN(GB(1)), .order = PAGE_ORDER_1G }, + { .align = PFN_DOWN(MB(2)), .order = PAGE_ORDER_2M }, + { .align = PFN_DOWN(KB(4)), .order = PAGE_ORDER_4K }, + }; + unsigned int max_order = MAX_ORDER, i = 0; struct page_info *page; int rc; #define MAP_MAX_ITER 64 while ( nr_pages != 0 ) { - unsigned int range_order = get_order_from_pages(nr_pages + 1); + unsigned int order, j; + unsigned long end; + + /* Search for the largest page size which can fulfil this request. */ + for ( j = 0; j < ARRAY_SIZE(orders); j++ ) + if ( IS_ALIGNED(start, orders[j].align) && + nr_pages >= (1UL << orders[j].order) ) + break; + + switch ( j ) + { + case ARRAY_SIZE(orders): + printk("Unable to find allocation order for [%#lx,%#lx)\n", + start, start + nr_pages); + return -EINVAL; + + case 0: + /* Highest order, aim to allocate until the end of the region. */ + end = (start + nr_pages) & ~(orders[0].align - 1); + break; + + default: + /* + * Aim to allocate until the higher next order alignment or the + * end of the region. + */ + end = min(ROUNDUP(start + 1, orders[j - 1].align), + start + nr_pages); + break; + } - order = min(range_order ? range_order - 1 : 0, order); + order = get_order_from_pages(end - start + 1); + order = min(order ? order - 1 : 0, max_order); page = alloc_domheap_pages(d, order, dom0_memflags | MEMF_no_scrub); if ( page == NULL ) { @@ -108,7 +147,7 @@ static int __init pvh_populate_memory_range(struct domain *d, { /* Try again without any dom0_memflags. */ dom0_memflags = 0; - order = MAX_ORDER; + max_order = MAX_ORDER; continue; } if ( order == 0 ) @@ -116,7 +155,7 @@ static int __init pvh_populate_memory_range(struct domain *d, printk("Unable to allocate memory with order 0!\n"); return -ENOMEM; } - order--; + max_order = order - 1; continue; } -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |