[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] alloc_boot_pages() allocates downwards from high memory.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1172157981 0
# Node ID 3746b3d4f3010d208958e8fce74fe4ae52b0b35f
# Parent  9e35371a3caa3fa3be2af51f48c88c99932d6141
alloc_boot_pages() allocates downwards from high memory.
This conserves low memory.

Provide new function alloc_boot_low_pages() for those callers who
actually require lowmem pages (e.g., below 4GB).

Based on a patch by Chris Lalancette <clalance@xxxxxxxxxx>

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/setup.c    |    8 +----
 xen/common/page_alloc.c |   74 +++++++++++++++++++++++++++++++++---------------
 xen/include/xen/mm.h    |    7 +++-
 3 files changed, 59 insertions(+), 30 deletions(-)

diff -r 9e35371a3caa -r 3746b3d4f301 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Thu Feb 22 15:22:16 2007 +0000
+++ b/xen/arch/x86/setup.c      Thu Feb 22 15:26:21 2007 +0000
@@ -508,7 +508,6 @@ void __init __start_xen(multiboot_info_t
         unsigned long kdump_start, kdump_size, k;
 
         /* Mark images pages as free for now. */
-
         init_boot_pages(initial_images_start, initial_images_end);
 
         kdump_start = kexec_crash_area.start;
@@ -526,17 +525,14 @@ void __init __start_xen(multiboot_info_t
         kdump_size >>= PAGE_SHIFT;
 
         /* Allocate pages for Kdump memory area. */
-
-        k = alloc_boot_pages_at(kdump_size, kdump_start);
-        if ( k != kdump_start )
+        if ( !reserve_boot_pages(kdump_start, kdump_size) )
             panic("Unable to reserve Kdump memory\n");
 
         /* Allocate pages for relocated initial images. */
-
         k = ((initial_images_end - initial_images_start) & ~PAGE_MASK) ? 1 : 0;
         k += (initial_images_end - initial_images_start) >> PAGE_SHIFT;
 
-        k = alloc_boot_pages(k, 1);
+        k = alloc_boot_low_pages(k, 1);
         if ( k == 0 )
             panic("Unable to allocate initial images memory\n");
 
diff -r 9e35371a3caa -r 3746b3d4f301 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Feb 22 15:22:16 2007 +0000
+++ b/xen/common/page_alloc.c   Thu Feb 22 15:26:21 2007 +0000
@@ -95,9 +95,10 @@ static unsigned long *alloc_bitmap;
 static unsigned long *alloc_bitmap;
 #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
 
-#define allocated_in_map(_pn)                 \
-( !! (alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & \
-     (1UL<<((_pn)&(PAGES_PER_MAPWORD-1)))) )
+#define allocated_in_map(_pn)                       \
+({  unsigned long ___pn = (_pn);                    \
+    !!(alloc_bitmap[___pn/PAGES_PER_MAPWORD] &      \
+       (1UL<<(___pn&(PAGES_PER_MAPWORD-1)))); })
 
 /*
  * Hint regarding bitwise arithmetic in map_{alloc,free}:
@@ -240,36 +241,65 @@ void init_boot_pages(paddr_t ps, paddr_t
     }
 }
 
-unsigned long alloc_boot_pages_at(unsigned long nr_pfns, unsigned long pfn_at)
+int reserve_boot_pages(unsigned long first_pfn, unsigned long nr_pfns)
 {
     unsigned long i;
 
     for ( i = 0; i < nr_pfns; i++ )
-        if ( allocated_in_map(pfn_at + i) )
+        if ( allocated_in_map(first_pfn + i) )
              break;
 
-    if ( i == nr_pfns )
-    {
-        map_alloc(pfn_at, nr_pfns);
-        return pfn_at;
+    if ( i != nr_pfns )
+        return 0;
+
+    map_alloc(first_pfn, nr_pfns);
+    return 1;
+}
+
+unsigned long alloc_boot_low_pages(
+    unsigned long nr_pfns, unsigned long pfn_align)
+{
+    unsigned long pg, i;
+
+    /* Search forwards to obtain lowest available range. */
+    for ( pg = first_valid_mfn & ~(pfn_align-1);
+          (pg + nr_pfns) < max_page;
+          pg = (pg + i + pfn_align - 1) & ~(pfn_align - 1) )
+    {
+        for ( i = 0; i < nr_pfns; i++ )
+            if ( allocated_in_map(pg+i) )
+                break;
+        if ( i == nr_pfns )
+        {
+            map_alloc(pg, nr_pfns);
+            return pg;
+        }
     }
 
     return 0;
 }
 
-unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align)
-{
-    unsigned long pg;
-
-    pg = first_valid_mfn & ~(pfn_align-1);
-    while ( (pg + nr_pfns) < max_page )
-    {
-        if ( alloc_boot_pages_at(nr_pfns, pg) != 0 )
-            break;
-        pg += pfn_align;
-    }
-
-    return pg;
+unsigned long alloc_boot_pages(
+    unsigned long nr_pfns, unsigned long pfn_align)
+{
+    unsigned long pg, i;
+
+    /* Search backwards to obtain highest available range. */
+    for ( pg = (max_page - nr_pfns) & ~(pfn_align - 1);
+          pg >= first_valid_mfn;
+          pg = (pg + i - nr_pfns) & ~(pfn_align - 1) )
+    {
+        for ( i = 0; i < nr_pfns; i++ )
+            if ( allocated_in_map(pg+i) )
+                break;
+        if ( i == nr_pfns )
+        {
+            map_alloc(pg, nr_pfns);
+            return pg;
+        }
+    }
+
+    return 0;
 }
 
 
diff -r 9e35371a3caa -r 3746b3d4f301 xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Thu Feb 22 15:22:16 2007 +0000
+++ b/xen/include/xen/mm.h      Thu Feb 22 15:26:21 2007 +0000
@@ -39,8 +39,11 @@ struct page_info;
 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
 paddr_t init_boot_allocator(paddr_t bitmap_start);
 void init_boot_pages(paddr_t ps, paddr_t pe);
-unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
-unsigned long alloc_boot_pages_at(unsigned long nr_pfns, unsigned long pfn_at);
+unsigned long alloc_boot_pages(
+    unsigned long nr_pfns, unsigned long pfn_align);
+unsigned long alloc_boot_low_pages(
+    unsigned long nr_pfns, unsigned long pfn_align);
+int reserve_boot_pages(unsigned long first_pfn, unsigned long nr_pfns);
 void end_boot_allocator(void);
 
 /* Generic allocator. These functions are *not* interrupt-safe. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.