[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] PoD: appropriate BUG_ON when domain is dying



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260520948 0
# Node ID 295e77eed8c9004f3a405f7d386eee725c5ac523
# Parent  8f304c003af49d3b20aff5a6c2c963a7af696378
PoD: appropriate BUG_ON when domain is dying

BUG_ON(d->is_dying) in p2m_pod_cache_add() which is introduced in
c/s 20426 is not proper. Since dom->is_dying is set asynchronously.
For example, MMU_UPDATE hypercalls from qemu and the
DOMCTL_destroydomain hypercall from xend can be issued simultaneously.

Also this patch lets p2m_pod_empty_cache() wait by spin_barrier
until another PoD operation ceases.

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c |   25 +++++++++++++++++++++----
 1 files changed, 21 insertions(+), 4 deletions(-)

diff -r 8f304c003af4 -r 295e77eed8c9 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Dec 09 10:59:31 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c     Fri Dec 11 08:42:28 2009 +0000
@@ -267,6 +267,8 @@ p2m_pod_cache_add(struct domain *d,
     }
 #endif
 
+    ASSERT(p2m_locked_by_me(p2md));
+
     /*
      * Pages from domain_alloc and returned by the balloon driver aren't
      * guaranteed to be zero; but by reclaiming zero pages, we implicitly
@@ -303,7 +305,9 @@ p2m_pod_cache_add(struct domain *d,
         BUG();
     }
 
-    BUG_ON(d->is_dying);
+    /* Ensure that the PoD cache has never been emptied.  
+     * This may cause "zombie domains" since the page will never be freed. */
+    BUG_ON( d->arch.relmem != RELMEM_not_started );
 
     spin_unlock(&d->page_alloc_lock);
 
@@ -501,6 +505,8 @@ p2m_pod_set_mem_target(struct domain *d,
     int ret = 0;
     unsigned long populated;
 
+    p2m_lock(p2md);
+
     /* P == B: Nothing to do. */
     if ( p2md->pod.entry_count == 0 )
         goto out;
@@ -528,6 +534,8 @@ p2m_pod_set_mem_target(struct domain *d,
     ret = p2m_pod_set_cache_target(d, pod_target);
 
 out:
+    p2m_unlock(p2md);
+
     return ret;
 }
 
@@ -536,6 +544,10 @@ p2m_pod_empty_cache(struct domain *d)
 {
     struct p2m_domain *p2md = d->arch.p2m;
     struct page_info *page;
+
+    /* After this barrier no new PoD activities can happen. */
+    BUG_ON(!d->is_dying);
+    spin_barrier(&p2md->lock);
 
     spin_lock(&d->page_alloc_lock);
 
@@ -588,7 +600,7 @@ p2m_pod_decrease_reservation(struct doma
 
     /* If we don't have any outstanding PoD entries, let things take their
      * course */
-    if ( p2md->pod.entry_count == 0 || unlikely(d->is_dying) )
+    if ( p2md->pod.entry_count == 0 )
         goto out;
 
     /* Figure out if we need to steal some freed memory for our cache */
@@ -596,6 +608,9 @@ p2m_pod_decrease_reservation(struct doma
 
     p2m_lock(p2md);
     audit_p2m(d);
+
+    if ( unlikely(d->is_dying) )
+        goto out_unlock;
 
     /* See what's in here. */
     /* FIXME: Add contiguous; query for PSE entries? */
@@ -1006,9 +1021,11 @@ p2m_pod_demand_populate(struct domain *d
     struct p2m_domain *p2md = d->arch.p2m;
     int i;
 
+    ASSERT(p2m_locked_by_me(d->arch.p2m));
+
     /* This check is done with the p2m lock held.  This will make sure that
-     * even if d->is_dying changes under our feet, empty_pod_cache() won't 
start
-     * until we're done. */
+     * even if d->is_dying changes under our feet, p2m_pod_empty_cache() 
+     * won't start until we're done. */
     if ( unlikely(d->is_dying) )
         goto out_fail;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.