[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix handling of memory holes for Xen heap and domain 0



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 2f75dac09365959d87709d15a181201abf189cb8
# Parent  9d0120a5aa452049ae78488fb990c31a8b973fe8
Fix handling of memory holes for Xen heap and domain 0
kernel image and ramdisk.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 9d0120a5aa45 -r 2f75dac09365 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Thu Aug 11 16:48:36 2005
+++ b/xen/arch/x86/setup.c      Thu Aug 11 18:03:22 2005
@@ -248,10 +248,11 @@
 {
     char *cmdline;
     module_t *mod = (module_t *)__va(mbi->mods_addr);
-    unsigned long firsthole_start, nr_pages;
+    unsigned long nr_pages, modules_length;
     unsigned long initial_images_start, initial_images_end;
     unsigned long _initrd_start = 0, _initrd_len = 0;
     unsigned int initrdidx = 1;
+    physaddr_t s, e;
     struct e820entry e820_raw[E820MAX];
     int i, e820_raw_nr = 0, bytes = 0;
     struct ns16550_defaults ns16550 = {
@@ -330,22 +331,31 @@
 
     max_page = init_e820(e820_raw, &e820_raw_nr);
 
-    /* Find the first high-memory RAM hole. */
-    for ( i = 0; i < e820.nr_map; i++ )
+    modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
+
+    /* Find a large enough RAM extent to stash the DOM0 modules. */
+    for ( i = 0; ; i++ )
+    {
         if ( (e820.map[i].type == E820_RAM) &&
-             (e820.map[i].addr >= 0x100000) )
+             (e820.map[i].size >= modules_length) &&
+             ((e820.map[i].addr + e820.map[i].size) >=
+              (xenheap_phys_end + modules_length)) )
+        {
+            /* Stash as near as possible to the beginning of the RAM extent. */
+            initial_images_start = e820.map[i].addr;
+            if ( initial_images_start < xenheap_phys_end )
+                initial_images_start = xenheap_phys_end;
+            initial_images_end = initial_images_start + modules_length;
             break;
-    firsthole_start = e820.map[i].addr + e820.map[i].size;
-
-    /* Relocate the Multiboot modules. */
-    initial_images_start = xenheap_phys_end;
-    initial_images_end   = initial_images_start + 
-        (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
-    if ( initial_images_end > firsthole_start )
-    {
-        printk("Not enough memory to stash the DOM0 kernel image.\n");
-        for ( ; ; ) ;
-    }
+        }
+
+        if ( i == e820.nr_map )
+        {
+            printk("Not enough memory to stash the DOM0 kernel image.\n");
+            for ( ; ; ) ;
+        }
+    }
+
 #if defined(CONFIG_X86_32)
     memmove((void *)initial_images_start,  /* use low mapping */
             (void *)mod[0].mod_start,      /* use low mapping */
@@ -358,16 +368,23 @@
 
     /* Initialise boot-time allocator with all RAM situated after modules. */
     xenheap_phys_start = init_boot_allocator(__pa(&_end));
-    nr_pages   = 0;
+    nr_pages = 0;
     for ( i = 0; i < e820.nr_map; i++ )
     {
         if ( e820.map[i].type != E820_RAM )
             continue;
+
         nr_pages += e820.map[i].size >> PAGE_SHIFT;
-        if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
-            init_boot_pages((e820.map[i].addr < initial_images_end) ?
-                            initial_images_end : e820.map[i].addr,
-                            e820.map[i].addr + e820.map[i].size);
+
+        /* Initialise boot heap, skipping Xen heap and dom0 modules. */
+        s = e820.map[i].addr;
+        e = s + e820.map[i].size;
+        if ( s < xenheap_phys_end )
+            s = xenheap_phys_end;
+        if ( (s < initial_images_end) && (e > initial_images_start) )
+            s = initial_images_end;
+        init_boot_pages(s, e);
+
 #if defined (CONFIG_X86_64)
         /*
          * x86/64 maps all registered RAM. Points to note:
@@ -404,10 +421,30 @@
 
     end_boot_allocator();
 
-    init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
-    printk("Xen heap: %luMB (%lukB)\n",
-           (xenheap_phys_end-xenheap_phys_start) >> 20,
-           (xenheap_phys_end-xenheap_phys_start) >> 10);
+    /* Initialise the Xen heap, skipping RAM holes. */
+    nr_pages = 0;
+    for ( i = 0; i < e820.nr_map; i++ )
+    {
+        if ( e820.map[i].type != E820_RAM )
+            continue;
+
+        s = e820.map[i].addr;
+        e = s + e820.map[i].size;
+        if ( s < xenheap_phys_start )
+            s = xenheap_phys_start;
+        if ( e > xenheap_phys_end )
+            e = xenheap_phys_end;
+ 
+        if ( s < e )
+        {
+            nr_pages += (e - s) >> PAGE_SHIFT;
+            init_xenheap_pages(s, e);
+        }
+    }
+
+    printk("Xen heap: %luMB (%lukB)\n", 
+           nr_pages >> (20 - PAGE_SHIFT),
+           nr_pages << (PAGE_SHIFT - 10));
 
     early_boot = 0;
 
diff -r 9d0120a5aa45 -r 2f75dac09365 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Thu Aug 11 16:48:36 2005
+++ b/xen/common/page_alloc.c   Thu Aug 11 18:03:22 2005
@@ -418,6 +418,8 @@
 
     ps = round_pgup(ps);
     pe = round_pgdown(pe);
+    if ( pe <= ps )
+        return;
 
     memguard_guard_range(phys_to_virt(ps), pe - ps);
 
@@ -487,19 +489,25 @@
 
     ps = round_pgup(ps) >> PAGE_SHIFT;
     pe = round_pgdown(pe) >> PAGE_SHIFT;
-
-    if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) {
-        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
-        init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN),
-                        pe - MAX_DMADOM_PFN);
+    if ( pe <= ps )
+        return;
+
+    if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
+    {
+        init_heap_pages(
+            MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
+        init_heap_pages(
+            MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
     }
     else
+    {
         init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
-}
-
-
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order,
-                                     unsigned int flags)
+    }
+}
+
+
+struct pfn_info *alloc_domheap_pages(
+    struct domain *d, unsigned int order, unsigned int flags)
 {
     struct pfn_info *pg;
     cpumask_t mask;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.