[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/12] xenpaging: drop paged pages in guest_remove_page


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Mon, 10 Jan 2011 17:43:53 +0100
  • Delivery-date: Mon, 10 Jan 2011 08:55:59 -0800
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

Simply drop paged-pages in guest_remove_page(), and notify xenpaging to
drop its reference to the gfn. If the ring is full, the page will
remain in paged-out state in xenpaging. This is not an issue, it just
means this gfn will not be nominated again.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
v3:
 send one-way notification to pager to release page
 use new mem_event_check_ring() feature to not pause vcpu when ring is full
v2:
 resume dropped page to unpause vcpus

 tools/xenpaging/xenpaging.c    |   39 ++++++++++++++++++++------------
 xen/arch/x86/mm/p2m.c          |   49 ++++++++++++++++++++++++++++++-----------
 xen/common/memory.c            |    6 +++++
 xen/include/asm-x86/p2m.h      |    4 +++
 xen/include/public/mem_event.h |    1 
 5 files changed, 72 insertions(+), 27 deletions(-)

--- xen-unstable.hg-4.1.22694.orig/tools/xenpaging/xenpaging.c
+++ xen-unstable.hg-4.1.22694/tools/xenpaging/xenpaging.c
@@ -638,25 +638,34 @@ int main(int argc, char *argv[])
                     goto out;
                 }
                 
-                /* Populate the page */
-                rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
-                if ( rc != 0 )
+                if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
                 {
-                    ERROR("Error populating page");
-                    goto out;
+                    DPRINTF("drop_page ^ gfn %"PRIx64" pageslot %d\n", 
req.gfn, i);
+                    /* Notify policy of page being dropped */
+                    policy_notify_paged_in(req.gfn);
                 }
+                else
+                {
+                    /* Populate the page */
+                    rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
+                    if ( rc != 0 )
+                    {
+                        ERROR("Error populating page");
+                        goto out;
+                    }
 
-                /* Prepare the response */
-                rsp.gfn = req.gfn;
-                rsp.p2mt = req.p2mt;
-                rsp.vcpu_id = req.vcpu_id;
-                rsp.flags = req.flags;
+                    /* Prepare the response */
+                    rsp.gfn = req.gfn;
+                    rsp.p2mt = req.p2mt;
+                    rsp.vcpu_id = req.vcpu_id;
+                    rsp.flags = req.flags;
 
-                rc = xenpaging_resume_page(paging, &rsp, 1);
-                if ( rc != 0 )
-                {
-                    ERROR("Error resuming page");
-                    goto out;
+                    rc = xenpaging_resume_page(paging, &rsp, 1);
+                    if ( rc != 0 )
+                    {
+                        ERROR("Error resuming page");
+                        goto out;
+                    }
                 }
 
                 /* Evict a new page to replace the one we just paged in */
--- xen-unstable.hg-4.1.22694.orig/xen/arch/x86/mm/p2m.c
+++ xen-unstable.hg-4.1.22694/xen/arch/x86/mm/p2m.c
@@ -2211,12 +2211,15 @@ p2m_remove_page(struct p2m_domain *p2m,
 
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
 
-    for ( i = 0; i < (1UL << page_order); i++ )
+    if ( mfn_valid(_mfn(mfn)) )
     {
-        mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
-        if ( !p2m_is_grant(t) )
-            set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
-        ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+        for ( i = 0; i < (1UL << page_order); i++ )
+        {
+            mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
+            if ( !p2m_is_grant(t) )
+                set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+            ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+        }
     }
     set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, 
p2m->default_access);
 }
@@ -2772,6 +2775,25 @@ int p2m_mem_paging_evict(struct p2m_doma
     return 0;
 }
 
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+{
+    struct vcpu *v = current;
+    mem_event_request_t req;
+    struct domain *d = p2m->domain;
+
+    /* Check that there's space on the ring for this request */
+    if ( mem_event_check_ring(d) == 0)
+    {
+        /* Send release notification to pager */
+        memset(&req, 0, sizeof(req));
+        req.flags |= MEM_EVENT_FLAG_DROP_PAGE;
+        req.gfn = gfn;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+    }
+}
+
 void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
 {
     struct vcpu *v = current;
@@ -2846,13 +2868,16 @@ void p2m_mem_paging_resume(struct p2m_do
     /* Pull the response off the ring */
     mem_event_get_response(d, &rsp);
 
-    /* Fix p2m entry */
-    mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
-    p2m_lock(p2m);
-    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
-    set_gpfn_from_mfn(mfn_x(mfn), gfn);
-    audit_p2m(p2m, 1);
-    p2m_unlock(p2m);
+    /* Fix p2m entry if the page was not dropped */
+    if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
+    {
+        mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
+        p2m_lock(p2m);
+        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
+        set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
+        audit_p2m(p2m, 1);
+        p2m_unlock(p2m);
+    }
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
--- xen-unstable.hg-4.1.22694.orig/xen/common/memory.c
+++ xen-unstable.hg-4.1.22694/xen/common/memory.c
@@ -163,6 +163,12 @@ int guest_remove_page(struct domain *d,
 
 #ifdef CONFIG_X86
     mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
+    if ( unlikely(p2m_is_paging(p2mt)) )
+    {
+        guest_physmap_remove_page(d, gmfn, mfn, 0);
+        p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
+        return 1;
+    }
 #else
     mfn = gmfn_to_mfn(d, gmfn);
 #endif
--- xen-unstable.hg-4.1.22694.orig/xen/include/asm-x86/p2m.h
+++ xen-unstable.hg-4.1.22694/xen/include/asm-x86/p2m.h
@@ -511,6 +511,8 @@ int set_shared_p2m_entry(struct p2m_doma
 int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
 /* Evict a frame */
 int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
+/* Tell xenpaging to drop a paged out frame */
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
 /* Start populating a paged out frame */
 void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
 /* Prepare the p2m for paging a frame in */
@@ -518,6 +520,8 @@ int p2m_mem_paging_prep(struct p2m_domai
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct p2m_domain *p2m);
 #else
+static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned 
long gfn)
+{ }
 static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned 
long gfn)
 { }
 #endif
--- xen-unstable.hg-4.1.22694.orig/xen/include/public/mem_event.h
+++ xen-unstable.hg-4.1.22694/xen/include/public/mem_event.h
@@ -33,6 +33,7 @@
 
 /* Memory event flags */
 #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
+#define MEM_EVENT_FLAG_DROP_PAGE    (1 << 1)
 
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.