[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xenpaging: handle evict failures



# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1318504870 -3600
# Node ID eda18b27de6ed873ceb0e2f633d04236c5479f6d
# Parent  1515484353c602c41760ff5e110f7490174537f4
xenpaging: handle evict failures

Evict of a nominated gfn must fail if some other process mapped the
page without checking the p2mt of that gfn first.
Add a check to cancel eviction if the page usage count is not 1.

Handle the possible eviction failure in the page-in paths.
After nominate and before evict, something may check the p2mt and call
populate. Handle this case and let the gfn enter the page-in path. The
gfn may still be connected to a mfn, so there is no need to allocate a
new page in prep.

Adjust do_mmu_update to return -ENOENT only if the gfn has entered the
page-in path and if it is not yet connected to a mfn. Otherwise
linux_privcmd_map_foreign_bulk() may loop forever.

Add MEM_EVENT_FLAG_EVICT_FAIL to inform pager that a page-in request for
a possible not-evicted page was sent. xenpaging does currently not need
that flag because failure to evict a gfn will be caught.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 1515484353c6 -r eda18b27de6e tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c       Thu Oct 13 10:09:28 2011 +0200
+++ b/tools/xenpaging/xenpaging.c       Thu Oct 13 12:21:10 2011 +0100
@@ -734,10 +734,12 @@
             }
             else
             {
-                DPRINTF("page already populated (domain = %d; vcpu = %d;"
-                        " gfn = %"PRIx64"; paused = %d)\n",
-                        paging->mem_event.domain_id, req.vcpu_id,
-                        req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
+                DPRINTF("page %s populated (domain = %d; vcpu = %d;"
+                        " gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n",
+                        req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" : 
"already",
+                        paging->mem_event.domain_id, req.vcpu_id, req.gfn,
+                        !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) ,
+                        !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) );
 
                 /* Tell Xen to resume the vcpu */
                 /* XXX: Maybe just check if the vcpu was paused? */
diff -r 1515484353c6 -r eda18b27de6e xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Oct 13 10:09:28 2011 +0200
+++ b/xen/arch/x86/mm.c Thu Oct 13 12:21:10 2011 +0100
@@ -3532,7 +3532,7 @@
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l1e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l1e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3572,7 +3572,7 @@
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l2e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l2e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3600,7 +3600,7 @@
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l3e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l3e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3628,7 +3628,7 @@
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l4e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l4e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
diff -r 1515484353c6 -r eda18b27de6e xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Oct 13 10:09:28 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c     Thu Oct 13 12:21:10 2011 +0100
@@ -731,15 +731,24 @@
     if ( unlikely(!mfn_valid(mfn)) )
         goto out;
 
-    if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) ||
-         (p2mt == p2m_ram_paging_in_start) )
+    /* Allow only nominated pages */
+    if ( p2mt != p2m_ram_paging_out )
         goto out;
 
+    ret = -EBUSY;
     /* Get the page so it doesn't get modified under Xen's feet */
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, d)) )
         goto out;
 
+    /* Check page count and type once more */
+    if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
+         (2 | PGC_allocated) )
+        goto out_put;
+
+    if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
+        goto out_put;
+
     /* Decrement guest domain's ref count of the page */
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
@@ -751,14 +760,15 @@
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
 
-    /* Put the page back so it gets freed */
-    put_page(page);
-
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
     ret = 0;
 
+ out_put:
+    /* Put the page back so it gets freed */
+    put_page(page);
+
  out:
     p2m_unlock(p2m);
     return ret;
@@ -788,6 +798,7 @@
     mem_event_request_t req;
     p2m_type_t p2mt;
     p2m_access_t a;
+    mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
@@ -799,21 +810,26 @@
 
     /* Fix p2m mapping */
     p2m_lock(p2m);
-    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
-    if ( p2mt == p2m_ram_paged )
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+    /* Allow only nominated or evicted pages to enter page-in path */
+    if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
     {
-        set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                      p2m_ram_paging_in_start, a);
+        /* Evict will fail now, tag this request for pager */
+        if ( p2mt == p2m_ram_paging_out )
+            req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
+
+        set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in_start, a);
         audit_p2m(p2m, 1);
     }
     p2m_unlock(p2m);
 
-    /* Pause domain */
-    if ( v->domain->domain_id == d->domain_id )
+    /* Pause domain if request came from guest and gfn has paging type */
+    if (  p2m_is_paging(p2mt) && v->domain->domain_id == d->domain_id )
     {
         vcpu_pause_nosync(v);
         req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
     }
+    /* No need to inform pager if the gfn is not in the page-out path */
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
@@ -834,20 +850,26 @@
     struct page_info *page;
     p2m_type_t p2mt;
     p2m_access_t a;
+    mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int ret = -ENOMEM;
 
     p2m_lock(p2m);
 
-    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
-    /* Get a free page */
-    page = alloc_domheap_page(p2m->domain, 0);
-    if ( unlikely(page == NULL) )
-        goto out;
+    /* Allocate a page if the gfn does not have one yet */
+    if ( !mfn_valid(mfn) )
+    {
+        /* Get a free page */
+        page = alloc_domheap_page(p2m->domain, 0);
+        if ( unlikely(page == NULL) )
+            goto out;
+        mfn = page_to_mfn(page);
+    }
 
     /* Fix p2m mapping */
-    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a);
+    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
 
     atomic_dec(&d->paged_pages);
diff -r 1515484353c6 -r eda18b27de6e xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h    Thu Oct 13 10:09:28 2011 +0200
+++ b/xen/include/public/mem_event.h    Thu Oct 13 12:21:10 2011 +0100
@@ -38,6 +38,7 @@
 /* Memory event flags */
 #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
 #define MEM_EVENT_FLAG_DROP_PAGE    (1 << 1)
+#define MEM_EVENT_FLAG_EVICT_FAIL   (1 << 2)
 
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.