[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [POWERPC][XEN] Track pages correctly



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 7b350fc692d5eca532595da7dbdcbf7375ec78fd
# Parent  990bd509a5f53b7d13820214c9bcaec7413da14b
[POWERPC][XEN] Track pages correctly

The following patch tracks and frees pages correctly.  It solves the problem 
where a foreign mapping would zombie a domain because the refcnts remained.
This involved:
  - implement relinquish_memory() for PowerPC
  - remove free_rma() since all pages get "relinquished" now.
  - foreign pages are get and put correctly

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 xen/arch/powerpc/domain.c     |   36 +++++++++++++++++++++++++++++++++++-
 xen/arch/powerpc/mm.c         |    7 -------
 xen/arch/powerpc/papr/xlate.c |    5 ++++-
 xen/include/asm-powerpc/mm.h  |    2 --
 4 files changed, 39 insertions(+), 11 deletions(-)

diff -r 990bd509a5f5 -r 7b350fc692d5 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/domain.c Wed Sep 13 18:41:11 2006 -0400
@@ -242,10 +242,44 @@ void sync_vcpu_execstate(struct vcpu *v)
     return;
 }
 
+static void relinquish_memory(struct domain *d, struct list_head *list)
+{
+    struct list_head *ent;
+    struct page_info  *page;
+
+    /* Use a recursive lock, as we may enter 'free_domheap_page'. */
+    spin_lock_recursive(&d->page_alloc_lock);
+
+    ent = list->next;
+    while ( ent != list )
+    {
+        page = list_entry(ent, struct page_info, list);
+
+        /* Grab a reference to the page so it won't disappear from under us. */
+        if ( unlikely(!get_page(page, d)) )
+        {
+            /* Couldn't get a reference -- someone is freeing this page. */
+            ent = ent->next;
+            continue;
+        }
+        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+            put_page_and_type(page);
+
+        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            put_page(page);
+
+        /* Follow the list chain and /then/ potentially free the page. */
+        ent = ent->next;
+        put_page(page);
+    }
+    spin_unlock_recursive(&d->page_alloc_lock);
+}
+
 void domain_relinquish_resources(struct domain *d)
 {
-    free_rma(d);
+    relinquish_memory(d, &d->page_list);
     free_extents(d);
+    return;
 }
 
 void arch_dump_domain_info(struct domain *d)
diff -r 990bd509a5f5 -r 7b350fc692d5 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/mm.c     Wed Sep 13 18:41:11 2006 -0400
@@ -329,13 +329,6 @@ int allocate_rma(struct domain *d, unsig
     return 0;
 }
 
-void free_rma(struct domain *d)
-{
-    if (d->arch.rma_page) {
-        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
-    }
-}
-
 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
diff -r 990bd509a5f5 -r 7b350fc692d5 xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/arch/powerpc/papr/xlate.c     Wed Sep 13 18:41:11 2006 -0400
@@ -263,6 +263,7 @@ static void h_enter(struct cpu_user_regs
 
                 BUG_ON(f == d);
                 get_domain(f);
+                get_page(pg, f);
             }
                 break;
             case PFN_TYPE_RMA:
@@ -510,8 +511,10 @@ static void h_remove(struct cpu_user_reg
             struct page_info *pg = mfn_to_page(mfn);
             struct domain *f = page_get_owner(pg);
 
-            if (f != d)
+            if (f != d) {
                 put_domain(f);
+                put_page(pg);
+            }
         }
     }
 
diff -r 990bd509a5f5 -r 7b350fc692d5 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Tue Sep 12 14:28:16 2006 -0500
+++ b/xen/include/asm-powerpc/mm.h      Wed Sep 13 18:41:11 2006 -0400
@@ -154,7 +154,6 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) ) {
-        panic("about to free page: 0x%lx\n", page_to_mfn(page));
         free_domheap_page(page);
     }
 }
@@ -259,7 +258,6 @@ static inline unsigned long gmfn_to_mfn(
 #define mfn_to_gmfn(_d, mfn) (mfn)
 
 extern int allocate_rma(struct domain *d, unsigned int order_pages);
-extern void free_rma(struct domain *d);
 extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
 extern void free_extents(struct domain *d);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.