[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xenpaging: break endless loop during inital page-out with large pagefiles



To allow the starting for xenpaging right after 'xm start XYZ', I
specified a pagefile size equal to the guest memory size in the hope to
catch more errors where the paged-out state of a p2mt is not checked.

While doing that, xenpaging got into an endless loop because some pages
cant be paged out right away. Now the policy reports an error if the gfn
number wraps.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 tools/xenpaging/policy_default.c |   30 ++++++++++++++++++++++++------
 tools/xenpaging/xenpaging.c      |    7 +++++--
 2 files changed, 29 insertions(+), 8 deletions(-)

--- xen-unstable.hg-4.1.22155.orig/tools/xenpaging/policy_default.c
+++ xen-unstable.hg-4.1.22155/tools/xenpaging/policy_default.c
@@ -32,6 +32,10 @@
 static unsigned long mru[MRU_SIZE];
 static unsigned int i_mru = 0;
 static unsigned long *bitmap;
+static unsigned long *unconsumed;
+static unsigned long current_gfn;
+static unsigned long bitmap_size;
+static unsigned long max_pages;
 
 
 int policy_init(xenpaging_t *paging)
@@ -43,6 +47,14 @@ int policy_init(xenpaging_t *paging)
     rc = alloc_bitmap(&bitmap, paging->bitmap_size);
     if ( rc != 0 )
         goto out;
+    /* Allocate bitmap to track unusable pages */
+    rc = alloc_bitmap(&unconsumed, paging->bitmap_size);
+    if ( rc != 0 )
+        goto out;
+
+    /* record bitmap_size */
+    bitmap_size = paging->bitmap_size;
+    max_pages = paging->domain_info->max_pages;
 
     /* Initialise MRU list of paged in pages */
     for ( i = 0; i < MRU_SIZE; i++ )
@@ -51,8 +63,6 @@ int policy_init(xenpaging_t *paging)
     /* Don't page out page 0 */
     set_bit(0, bitmap);
 
-    rc = 0;
-
  out:
     return rc;
 }
@@ -61,17 +71,24 @@ int policy_choose_victim(xc_interface *x
                          xenpaging_t *paging, domid_t domain_id,
                          xenpaging_victim_t *victim)
 {
+    unsigned long wrap = current_gfn;
     ASSERT(victim != NULL);
 
     /* Domain to pick on */
     victim->domain_id = domain_id;
-    
+
     do
     {
-        /* Randomly choose a gfn to evict */
-        victim->gfn = rand() % paging->domain_info->max_pages;
+        current_gfn++;
+        if ( current_gfn >= max_pages )
+            current_gfn = 0;
+        if ( wrap == current_gfn )
+            return -ENOSPC;
     }
-    while ( test_bit(victim->gfn, bitmap) );
+    while ( test_bit(current_gfn, bitmap) || test_bit(current_gfn, unconsumed) 
);
+
+    set_bit(current_gfn, unconsumed);
+    victim->gfn = current_gfn;
 
     return 0;
 }
@@ -79,6 +96,7 @@ int policy_choose_victim(xc_interface *x
 void policy_notify_paged_out(domid_t domain_id, unsigned long gfn)
 {
     set_bit(gfn, bitmap);
+    clear_bit(gfn, unconsumed);
 }
 
 void policy_notify_paged_in(domid_t domain_id, unsigned long gfn)
--- xen-unstable.hg-4.1.22155.orig/tools/xenpaging/xenpaging.c
+++ xen-unstable.hg-4.1.22155/tools/xenpaging/xenpaging.c
@@ -446,7 +446,8 @@ static int evict_victim(xc_interface *xc
         ret = policy_choose_victim(xch, paging, domain_id, victim);
         if ( ret != 0 )
         {
-            ERROR("Error choosing victim");
+            if ( ret != -ENOSPC )
+                ERROR("Error choosing victim");
             goto out;
         }
 
@@ -525,7 +526,9 @@ int main(int argc, char *argv[])
     memset(victims, 0, sizeof(xenpaging_victim_t) * num_pages);
     for ( i = 0; i < num_pages; i++ )
     {
-        evict_victim(xch, paging, domain_id, &victims[i], fd, i);
+        rc = evict_victim(xch, paging, domain_id, &victims[i], fd, i);
+        if ( rc == -ENOSPC )
+               break;
         if ( i % 100 == 0 )
             DPRINTF("%d pages evicted\n", i);
     }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.