[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [LINUX] Kexec: Ensure that pages allocated for kexec have MFNs which are within



# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxxxxx>
# Date 1173890372 0
# Node ID a1daade929520793f46feaaae9fd564271324eeb
# Parent  fa3d25355aa27eb814526bf965a9267104525e73
[LINUX] Kexec: Ensure that pages allocated for kexec have MFNs which are within
the required limits.

Previously we were lucky since most dom0 pages would be low enough but now Xen
is more aggressive in giving out higher MFNs when it can.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/kernel/kexec.c |   23 ++++++++++++++++++++---
 1 files changed, 20 insertions(+), 3 deletions(-)

diff -r fa3d25355aa2 -r a1daade92952 linux-2.6-xen-sparse/kernel/kexec.c
--- a/linux-2.6-xen-sparse/kernel/kexec.c       Wed Mar 14 16:36:04 2007 +0000
+++ b/linux-2.6-xen-sparse/kernel/kexec.c       Wed Mar 14 16:39:32 2007 +0000
@@ -330,13 +330,27 @@ static int kimage_is_destination_range(s
        return 0;
 }
 
-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, 
unsigned long limit)
 {
        struct page *pages;
 
        pages = alloc_pages(gfp_mask, order);
        if (pages) {
                unsigned int count, i;
+#ifdef CONFIG_XEN
+               int address_bits;
+
+               if (limit == ~0UL)
+                       address_bits = BITS_PER_LONG;
+               else
+                       address_bits = long_log2(limit);
+
+               if (xen_create_contiguous_region((unsigned 
long)page_address(pages),
+                                                order, address_bits) < 0) {
+                       __free_pages(pages, order);
+                       return NULL;
+               }
+#endif
                pages->mapping = NULL;
                set_page_private(pages, order);
                count = 1 << order;
@@ -355,6 +369,9 @@ static void kimage_free_pages(struct pag
        count = 1 << order;
        for (i = 0; i < count; i++)
                ClearPageReserved(page + i);
+#ifdef CONFIG_XEN
+       xen_destroy_contiguous_region((unsigned long)page_address(page), order);
+#endif
        __free_pages(page, order);
 }
 
@@ -400,7 +417,7 @@ static struct page *kimage_alloc_normal_
        do {
                unsigned long pfn, epfn, addr, eaddr;
 
-               pages = kimage_alloc_pages(GFP_KERNEL, order);
+               pages = kimage_alloc_pages(GFP_KERNEL, order, 
KEXEC_CONTROL_MEMORY_LIMIT);
                if (!pages)
                        break;
                pfn   = kexec_page_to_pfn(pages);
@@ -709,7 +726,7 @@ static struct page *kimage_alloc_page(st
                kimage_entry_t *old;
 
                /* Allocate a page, if we run out of memory give up */
-               page = kimage_alloc_pages(gfp_mask, 0);
+               page = kimage_alloc_pages(gfp_mask, 0, 
KEXEC_SOURCE_MEMORY_LIMIT);
                if (!page)
                        return NULL;
                /* If the page cannot be used file it away */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.