[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] When tmem is enabled, reserve a fraction of memory



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1266256444 0
# Node ID 3a0bd7ca6b1146e2165e245cb0d4c2872771de17
# Parent  cbb147631e8cda24fda36d8ab627e0d9f21c4547
When tmem is enabled, reserve a fraction of memory
for allocations of 0<order<9 to avoid fragmentation
issues.

Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
---
 xen/common/page_alloc.c |   19 ++++++++++++++++++-
 1 files changed, 18 insertions(+), 1 deletion(-)

diff -r cbb147631e8c -r 3a0bd7ca6b11 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Mon Feb 15 17:49:14 2010 +0000
+++ b/xen/common/page_alloc.c   Mon Feb 15 17:54:04 2010 +0000
@@ -224,6 +224,10 @@ static unsigned long *avail[MAX_NUMNODES
 static unsigned long *avail[MAX_NUMNODES];
 static long total_avail_pages;
 
+/* TMEM: Reserve a fraction of memory for mid-size (0<order<9) allocations.*/
+static long midsize_alloc_zone_pages;
+#define MIDSIZE_ALLOC_FRAC 128
+
 static DEFINE_SPINLOCK(heap_lock);
 
 static unsigned long init_node_heap(int node, unsigned long mfn,
@@ -304,6 +308,14 @@ static struct page_info *alloc_heap_page
     spin_lock(&heap_lock);
 
     /*
+     * TMEM: When available memory is scarce, allow only mid-size allocations
+     * to avoid worst of fragmentation issues.
+     */
+    if ( opt_tmem && ((order == 0) || (order >= 9)) &&
+         (total_avail_pages <= midsize_alloc_zone_pages) )
+        goto fail;
+
+    /*
      * Start with requested node, but exhaust all node memory in requested 
      * zone before failing, only calc new node value if we fail to find memory 
      * in target node, this avoids needless computation on fast-path.
@@ -336,6 +348,7 @@ static struct page_info *alloc_heap_page
         return pg;
     }
 
+ fail:
     /* No suitable memory blocks. Fail the request. */
     spin_unlock(&heap_lock);
     return NULL;
@@ -504,6 +517,10 @@ static void free_heap_pages(
     avail[node][zone] += 1 << order;
     total_avail_pages += 1 << order;
 
+    if ( opt_tmem )
+        midsize_alloc_zone_pages = max(
+            midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
+
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
     {
@@ -842,7 +859,7 @@ static unsigned long avail_heap_pages(
 
 unsigned long total_free_pages(void)
 {
-    return total_avail_pages;
+    return total_avail_pages - midsize_alloc_zone_pages;
 }
 
 void __init end_boot_allocator(void)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.