[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [linux-2.6.18-xen] xen/x86: fix and improve xen_limit_pages_to_max_mfn()
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1202723398 0 # Node ID 4d4b9fc9205bb12475074d7140567fe69d2c2d5d # Parent 5c61cd349b208a19fd7e9c8d07963166690a3de4 xen/x86: fix and improve xen_limit_pages_to_max_mfn() - don't do multicall when nr_mcl is zero (and specifically don't access cr_mcl[nr_mcl - 1] in that case) - fix CONFIG_XEN_COMPAT <=3D 0x030002 handling - don't exchange pages already meeting the restriction (likely avoiding exchanging anything at all) - avoid calling kmap functions without CONFIG_XEN_SCRUB_PAGES - eliminate a few local variables Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- arch/i386/mm/hypervisor.c | 90 +++++++++++++++++++++++++++------------------- 1 files changed, 53 insertions(+), 37 deletions(-) diff -r 5c61cd349b20 -r 4d4b9fc9205b arch/i386/mm/hypervisor.c --- a/arch/i386/mm/hypervisor.c Thu Feb 07 10:33:19 2008 +0000 +++ b/arch/i386/mm/hypervisor.c Mon Feb 11 09:49:58 2008 +0000 @@ -434,19 +434,17 @@ int xen_limit_pages_to_max_mfn( { unsigned long flags, frame; unsigned long *in_frames = discontig_frames, *out_frames = limited_frames; - void *v; struct page *page; - unsigned int i, nr_mcl; + unsigned int i, n, nr_mcl; int rc, success; + DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER); struct xen_memory_exchange exchange = { .in = { - .nr_extents = 1UL << order, .extent_order = 0, .domid = DOMID_SELF }, .out = { - .nr_extents = 1UL << order, .extent_order = 0, .address_bits = address_bits, .domid = DOMID_SELF @@ -459,80 +457,98 @@ int xen_limit_pages_to_max_mfn( if (unlikely(order > MAX_CONTIG_ORDER)) return -ENOMEM; + bitmap_zero(limit_map, 1U << order); set_xen_guest_handle(exchange.in.extent_start, in_frames); set_xen_guest_handle(exchange.out.extent_start, out_frames); /* 0. Scrub the pages. */ - for ( i = 0 ; i < 1UL<<order ; i++ ) { + for (i = 0, n = 0; i < 1U<<order ; i++) { page = &pages[i]; - - if (!PageHighMem(page)) { - v = page_address(page); - scrub_pages(v, 1); - } else { - v = kmap(page); - scrub_pages(v, 1); + if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT))) + continue; + __set_bit(i, limit_map); + + if (!PageHighMem(page)) + scrub_pages(page_address(page), 1); +#ifdef CONFIG_XEN_SCRUB_PAGES + else { + scrub_pages(kmap(page), 1); kunmap(page); + ++n; } - } - - kmap_flush_unused(); +#endif + } + if (bitmap_empty(limit_map, 1U << order)) + return 0; + + if (n) + kmap_flush_unused(); balloon_lock(flags); /* 1. Zap current PTEs (if any), remembering MFNs. */ - for (i = 0, nr_mcl = 0; i < (1U<<order); i++) { + for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) { + if(!test_bit(i, limit_map)) + continue; page = &pages[i]; - out_frames[i] = page_to_pfn(page); - in_frames[i] = pfn_to_mfn(out_frames[i]); + out_frames[n] = page_to_pfn(page); + in_frames[n] = pfn_to_mfn(out_frames[n]); if (!PageHighMem(page)) MULTI_update_va_mapping(cr_mcl + nr_mcl++, (unsigned long)page_address(page), __pte_ma(0), 0); - set_phys_to_machine(out_frames[i], INVALID_P2M_ENTRY); - } - if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL)) + set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY); + ++n; + } + if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL)) BUG(); /* 2. Get new memory below the required limit. */ + exchange.in.nr_extents = n; + exchange.out.nr_extents = n; rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); - success = (exchange.nr_exchanged == (1UL << order)); + success = (exchange.nr_exchanged == n); BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); BUG_ON(success && (rc != 0)); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { /* Compatibility when XENMEM_exchange is unsupported. */ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &exchange.in) != (1UL << order)) + &exchange.in) != n) BUG(); - success = (HYPERVISOR_memory_op(XENMEM_populate_physmap, - &exchange.out) != (1UL <<order)); + if (HYPERVISOR_memory_op(XENMEM_populate_physmap, + &exchange.out) != n) + BUG(); + success = 1; } #endif /* 3. Map the new pages in place of old pages. */ - for (i = 0, nr_mcl = 0; i < (1U<<order); i++) { - unsigned long pfn; + for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) { + if(!test_bit(i, limit_map)) + continue; page = &pages[i]; - pfn = page_to_pfn(page); - - frame = success ? out_frames[i] : in_frames[i]; + + frame = success ? out_frames[n] : in_frames[n]; if (!PageHighMem(page)) MULTI_update_va_mapping(cr_mcl + nr_mcl++, (unsigned long)page_address(page), pfn_pte_ma(frame, PAGE_KERNEL), 0); - set_phys_to_machine(pfn, frame); - } - cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order - ? UVMF_TLB_FLUSH|UVMF_ALL - : UVMF_INVLPG|UVMF_ALL; - if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL)) - BUG(); + set_phys_to_machine(page_to_pfn(page), frame); + ++n; + } + if (nr_mcl) { + cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order + ? UVMF_TLB_FLUSH|UVMF_ALL + : UVMF_INVLPG|UVMF_ALL; + if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL)) + BUG(); + } balloon_unlock(flags); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |