[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 05/13] xen: simplify xen_set_identity_and_remap() by using global variables
xen_set_identity_and_remap() is used to prepare remapping of memory conflicting with the E820 map. It is tracking the pfn where to remap new memory via a local variable which is passed to a subfunction which in turn returns the new value for that variable. Additionally the targeted maximum pfn is passed as a parameter to sub functions. Simplify that construct by using just global variables in the source for that purpose. This will make things simpler when we need those values later, too. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- arch/x86/xen/setup.c | 63 +++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ab6c36e..0dda131 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -56,6 +56,9 @@ static struct { } xen_remap_buf __initdata __aligned(PAGE_SIZE); static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; +static unsigned long xen_remap_pfn; +static unsigned long xen_max_pfn; + /* * The maximum amount of extra memory compared to the base size. The * main scaling factor is the size of struct page. At extreme ratios @@ -223,7 +226,7 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) + unsigned long end_pfn, unsigned long *released) { unsigned long pfn, end; int ret; @@ -231,7 +234,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN_ON(start_pfn > end_pfn); /* Release pages first. */ - end = min(end_pfn, nr_pages); + end = min(end_pfn, xen_max_pfn); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -302,7 +305,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) * its callers. */ static void __init xen_do_set_identity_and_remap_chunk( - unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) + unsigned long start_pfn, unsigned long size) { unsigned long buf = (unsigned long)&xen_remap_buf; unsigned long mfn_save, mfn; @@ -317,7 +320,7 @@ static void __init xen_do_set_identity_and_remap_chunk( mfn_save = virt_to_mfn(buf); - for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; + for (ident_pfn_iter = start_pfn, remap_pfn_iter = xen_remap_pfn; ident_pfn_iter < ident_end_pfn; ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) { chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; @@ -350,17 +353,16 @@ static void __init xen_do_set_identity_and_remap_chunk( * This function takes a contiguous pfn range that needs to be identity mapped * and: * - * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn. + * 1) Finds a new range of pfns to use to remap based on E820 and + * xen_remap_pfn. * 2) Calls the do_ function to actually do the mapping/remapping work. * * The goal is to not allocate additional memory but to remap the existing * pages. In the case of an error the underlying memory is simply released back * to Xen and not remapped. */ -static unsigned long __init xen_set_identity_and_remap_chunk( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, - unsigned long remap_pfn, unsigned long *released, - unsigned long *remapped) +static void __init xen_set_identity_and_remap_chunk(unsigned long start_pfn, + unsigned long end_pfn, unsigned long *released, unsigned long *remapped) { unsigned long pfn; unsigned long i = 0; @@ -373,30 +375,30 @@ static unsigned long __init xen_set_identity_and_remap_chunk( unsigned long remap_range_size; /* Do not remap pages beyond the current allocation */ - if (cur_pfn >= nr_pages) { + if (cur_pfn >= xen_max_pfn) { /* Identity map remaining pages */ set_phys_range_identity(cur_pfn, cur_pfn + size); break; } - if (cur_pfn + size > nr_pages) - size = nr_pages - cur_pfn; + if (cur_pfn + size > xen_max_pfn) + size = xen_max_pfn - cur_pfn; - remap_range_size = xen_find_pfn_range(&remap_pfn); + remap_range_size = xen_find_pfn_range(&xen_remap_pfn); if (!remap_range_size) { pr_warning("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages, released); + cur_pfn + left, released); break; } /* Adjust size to fit in current e820 RAM region */ if (size > remap_range_size) size = remap_range_size; - xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn); + xen_do_set_identity_and_remap_chunk(cur_pfn, size); /* Update variables to reflect new mappings. */ i += size; - remap_pfn += size; + xen_remap_pfn += size; *remapped += size; } @@ -408,20 +410,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( (void)HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), mfn_pte(pfn, PAGE_KERNEL_IO), 0); - - return remap_pfn; } -static void __init xen_set_identity_and_remap(unsigned long nr_pages, - unsigned long *released, unsigned long *remapped) +static void __init xen_set_identity_and_remap(unsigned long *released, + unsigned long *remapped) { phys_addr_t start = 0; - unsigned long last_pfn = nr_pages; const struct e820entry *entry = xen_e820_map; unsigned long num_released = 0; unsigned long num_remapped = 0; int i; + xen_remap_pfn = xen_max_pfn; + /* * Combine non-RAM regions and gaps until a RAM region (or the * end of the map) is reached, then set the 1:1 map and @@ -443,10 +444,8 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages, end_pfn = PFN_UP(entry->addr); if (start_pfn < end_pfn) - last_pfn = xen_set_identity_and_remap_chunk( - start_pfn, end_pfn, nr_pages, - last_pfn, &num_released, - &num_remapped); + xen_set_identity_and_remap_chunk(start_pfn, + end_pfn, &num_released, &num_remapped); start = end; } } @@ -600,7 +599,6 @@ static void __init xen_reserve_xen_mfnlist(void) **/ char * __init xen_memory_setup(void) { - unsigned long max_pfn = xen_start_info->nr_pages; phys_addr_t mem_end; int rc; struct xen_memory_map memmap; @@ -610,8 +608,8 @@ char * __init xen_memory_setup(void) int i; int op; - max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); - mem_end = PFN_PHYS(max_pfn); + xen_max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); + mem_end = PFN_PHYS(xen_max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, xen_e820_map); @@ -650,15 +648,14 @@ char * __init xen_memory_setup(void) &xen_e820_map_entries); max_pages = xen_get_max_pages(); - if (max_pages > max_pfn) - extra_pages += max_pages - max_pfn; + if (max_pages > xen_max_pfn) + extra_pages += max_pages - xen_max_pfn; /* * Set identity map on non-RAM pages and prepare remapping the * underlying RAM. */ - xen_set_identity_and_remap(max_pfn, &xen_released_pages, - &remapped_pages); + xen_set_identity_and_remap(&xen_released_pages, &remapped_pages); extra_pages += xen_released_pages; extra_pages += remapped_pages; @@ -674,7 +671,7 @@ char * __init xen_memory_setup(void) * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ - extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages = min(EXTRA_MEM_RATIO * min(xen_max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < xen_e820_map_entries) { -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |