[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xenpaging: drop paged pages in guest_remove_page



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1294742308 0
# Node ID c5b42971234a7b898d104e8db6a23432ff5a9e30
# Parent  ca590ccc7a0bc80fa82091b242498a5df2fcbad4
xenpaging: drop paged pages in guest_remove_page

Simply drop paged-pages in guest_remove_page(), and notify xenpaging
to drop its reference to the gfn. If the ring is full, the page will
remain in paged-out state in xenpaging. This is not an issue, it just
means this gfn will not be nominated again.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
---
 tools/xenpaging/xenpaging.c    |   41 ++++++++++++++++++++------------
 xen/arch/x86/mm/p2m.c          |   51 ++++++++++++++++++++++++++++++-----------
 xen/common/memory.c            |    6 ++++
 xen/include/asm-x86/p2m.h      |    4 +++
 xen/include/public/mem_event.h |    1 
 5 files changed, 74 insertions(+), 29 deletions(-)

diff -r ca590ccc7a0b -r c5b42971234a tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c       Tue Jan 11 10:37:45 2011 +0000
+++ b/tools/xenpaging/xenpaging.c       Tue Jan 11 10:38:28 2011 +0000
@@ -638,25 +638,34 @@ int main(int argc, char *argv[])
                     goto out;
                 }
                 
-                /* Populate the page */
-                rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
-                if ( rc != 0 )
+                if ( req.flags & MEM_EVENT_FLAG_DROP_PAGE )
                 {
-                    ERROR("Error populating page");
-                    goto out;
+                    DPRINTF("drop_page ^ gfn %"PRIx64" pageslot %d\n", 
req.gfn, i);
+                    /* Notify policy of page being dropped */
+                    policy_notify_paged_in(req.gfn);
                 }
-
-                /* Prepare the response */
-                rsp.gfn = req.gfn;
-                rsp.p2mt = req.p2mt;
-                rsp.vcpu_id = req.vcpu_id;
-                rsp.flags = req.flags;
-
-                rc = xenpaging_resume_page(paging, &rsp, 1);
-                if ( rc != 0 )
+                else
                 {
-                    ERROR("Error resuming page");
-                    goto out;
+                    /* Populate the page */
+                    rc = xenpaging_populate_page(paging, &req.gfn, fd, i);
+                    if ( rc != 0 )
+                    {
+                        ERROR("Error populating page");
+                        goto out;
+                    }
+
+                    /* Prepare the response */
+                    rsp.gfn = req.gfn;
+                    rsp.p2mt = req.p2mt;
+                    rsp.vcpu_id = req.vcpu_id;
+                    rsp.flags = req.flags;
+
+                    rc = xenpaging_resume_page(paging, &rsp, 1);
+                    if ( rc != 0 )
+                    {
+                        ERROR("Error resuming page");
+                        goto out;
+                    }
                 }
 
                 /* Evict a new page to replace the one we just paged in */
diff -r ca590ccc7a0b -r c5b42971234a xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Jan 11 10:37:45 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Tue Jan 11 10:38:28 2011 +0000
@@ -2211,12 +2211,15 @@ p2m_remove_page(struct p2m_domain *p2m, 
 
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
 
-    for ( i = 0; i < (1UL << page_order); i++ )
-    {
-        mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
-        if ( !p2m_is_grant(t) )
-            set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
-        ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+    if ( mfn_valid(_mfn(mfn)) )
+    {
+        for ( i = 0; i < (1UL << page_order); i++ )
+        {
+            mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query);
+            if ( !p2m_is_grant(t) )
+                set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+            ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
+        }
     }
     set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, 
p2m->default_access);
 }
@@ -2772,6 +2775,25 @@ int p2m_mem_paging_evict(struct p2m_doma
     return 0;
 }
 
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+{
+    struct vcpu *v = current;
+    mem_event_request_t req;
+    struct domain *d = p2m->domain;
+
+    /* Check that there's space on the ring for this request */
+    if ( mem_event_check_ring(d) == 0)
+    {
+        /* Send release notification to pager */
+        memset(&req, 0, sizeof(req));
+        req.flags |= MEM_EVENT_FLAG_DROP_PAGE;
+        req.gfn = gfn;
+        req.vcpu_id = v->vcpu_id;
+
+        mem_event_put_request(d, &req);
+    }
+}
+
 void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
 {
     struct vcpu *v = current;
@@ -2846,13 +2868,16 @@ void p2m_mem_paging_resume(struct p2m_do
     /* Pull the response off the ring */
     mem_event_get_response(d, &rsp);
 
-    /* Fix p2m entry */
-    mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
-    p2m_lock(p2m);
-    set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
-    set_gpfn_from_mfn(mfn_x(mfn), gfn);
-    audit_p2m(p2m, 1);
-    p2m_unlock(p2m);
+    /* Fix p2m entry if the page was not dropped */
+    if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
+    {
+        mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
+        p2m_lock(p2m);
+        set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
+        set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
+        audit_p2m(p2m, 1);
+        p2m_unlock(p2m);
+    }
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r ca590ccc7a0b -r c5b42971234a xen/common/memory.c
--- a/xen/common/memory.c       Tue Jan 11 10:37:45 2011 +0000
+++ b/xen/common/memory.c       Tue Jan 11 10:38:28 2011 +0000
@@ -163,6 +163,12 @@ int guest_remove_page(struct domain *d, 
 
 #ifdef CONFIG_X86
     mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
+    if ( unlikely(p2m_is_paging(p2mt)) )
+    {
+        guest_physmap_remove_page(d, gmfn, mfn, 0);
+        p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
+        return 1;
+    }
 #else
     mfn = gmfn_to_mfn(d, gmfn);
 #endif
diff -r ca590ccc7a0b -r c5b42971234a xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Tue Jan 11 10:37:45 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Tue Jan 11 10:38:28 2011 +0000
@@ -511,6 +511,8 @@ int p2m_mem_paging_nominate(struct p2m_d
 int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
 /* Evict a frame */
 int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
+/* Tell xenpaging to drop a paged out frame */
+void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
 /* Start populating a paged out frame */
 void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
 /* Prepare the p2m for paging a frame in */
@@ -518,6 +520,8 @@ int p2m_mem_paging_prep(struct p2m_domai
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct p2m_domain *p2m);
 #else
+static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned 
long gfn)
+{ }
 static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned 
long gfn)
 { }
 #endif
diff -r ca590ccc7a0b -r c5b42971234a xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h    Tue Jan 11 10:37:45 2011 +0000
+++ b/xen/include/public/mem_event.h    Tue Jan 11 10:38:28 2011 +0000
@@ -33,6 +33,7 @@
 
 /* Memory event flags */
 #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
+#define MEM_EVENT_FLAG_DROP_PAGE    (1 << 1)
 
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.