[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] PoD memory 4/9: Decrease reservation



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1231152230 0
# Node ID bd33ff263e2cf2d09f9ece01831a41af79019f63
# Parent  f2ba08549466d595fd5901ad655ebe82c266753d
PoD memory 4/9: Decrease reservation

Handle balloon driver's calls to decrease_reservation properly.
* Replace PoD entries with p2m_none
* Steal memory for the cache instead of freeing, if need be

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c     |  123 ++++++++++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c       |    5 +
 xen/include/asm-x86/p2m.h |    7 ++
 3 files changed, 135 insertions(+)

diff -r f2ba08549466 -r bd33ff263e2c xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Jan 05 10:43:19 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c     Mon Jan 05 10:43:50 2009 +0000
@@ -253,6 +253,10 @@ p2m_next_level(struct domain *d, mfn_t *
 /*
  * Populate-on-demand functionality
  */
+static
+int set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, 
+                  unsigned int page_order, p2m_type_t p2mt);
+
 int
 p2m_pod_cache_add(struct domain *d,
                   struct page_info *page,
@@ -362,6 +366,125 @@ p2m_pod_empty_cache(struct domain *d)
     BUG_ON(p2md->pod.count != 0);
 
     spin_unlock(&d->page_alloc_lock);
+}
+
+/* This function is needed for two reasons:
+ * + To properly handle clearing of PoD entries
+ * + To "steal back" memory being freed for the PoD cache, rather than
+ *   releasing it.
+ *
+ * Once both of these functions have been completed, we can return and
+ * allow decrease_reservation() to handle everything else.
+ */
+int
+p2m_pod_decrease_reservation(struct domain *d,
+                             xen_pfn_t gpfn,
+                             unsigned int order)
+{
+    struct p2m_domain *p2md = d->arch.p2m;
+    int ret=0;
+    int i;
+
+    int steal_for_cache = 0;
+    int pod = 0, nonpod = 0, ram = 0;
+    
+
+    /* If we don't have any outstanding PoD entries, let things take their
+     * course */
+    if ( p2md->pod.entry_count == 0 )
+        goto out;
+
+    /* Figure out if we need to steal some freed memory for our cache */
+    steal_for_cache =  ( p2md->pod.entry_count > p2md->pod.count );
+
+    p2m_lock(p2md);
+    audit_p2m(d);
+
+    /* See what's in here. */
+    /* FIXME: Add contiguous; query for PSE entries? */
+    for ( i=0; i<(1<<order); i++)
+    {
+        p2m_type_t t;
+
+        gfn_to_mfn_query(d, gpfn + i, &t);
+
+        if ( t == p2m_populate_on_demand )
+            pod++;
+        else
+        {
+            nonpod++;
+            if ( p2m_is_ram(t) )
+                ram++;
+        }
+    }
+
+    /* No populate-on-demand?  Don't need to steal anything?  Then we're 
done!*/
+    if(!pod && !steal_for_cache)
+        goto out_unlock;
+
+    if ( !nonpod )
+    {
+        /* All PoD: Mark the whole region invalid and tell caller
+         * we're done. */
+        set_p2m_entry(d, gpfn, _mfn(INVALID_MFN), order, p2m_invalid);
+        p2md->pod.entry_count-=(1<<order); /* Lock: p2m */
+        BUG_ON(p2md->pod.entry_count < 0);
+        ret = 1;
+        goto out_unlock;
+    }
+
+    /* FIXME: Steal contig 2-meg regions for cache */
+
+    /* Process as long as:
+     * + There are PoD entries to handle, or
+     * + There is ram left, and we want to steal it
+     */
+    for ( i=0;
+          i<(1<<order) && (pod>0 || (steal_for_cache && ram > 0));
+          i++)
+    {
+        mfn_t mfn;
+        p2m_type_t t;
+
+        mfn = gfn_to_mfn_query(d, gpfn + i, &t);
+        if ( t == p2m_populate_on_demand )
+        {
+            set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+            p2md->pod.entry_count--; /* Lock: p2m */
+            BUG_ON(p2md->pod.entry_count < 0);
+            pod--;
+        }
+        else if ( steal_for_cache && p2m_is_ram(t) )
+        {
+            struct page_info *page;
+
+            ASSERT(mfn_valid(mfn));
+
+            page = mfn_to_page(mfn);
+
+            set_p2m_entry(d, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid);
+            set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+
+            p2m_pod_cache_add(d, page, 0);
+
+            steal_for_cache =  ( p2md->pod.entry_count > p2md->pod.count );
+
+            nonpod--;
+            ram--;
+        }
+    }    
+
+    /* If there are no more non-PoD entries, tell decrease_reservation() that
+     * there's nothing left to do. */
+    if ( nonpod == 0 )
+        ret = 1;
+
+out_unlock:
+    audit_p2m(d);
+    p2m_unlock(p2md);
+
+out:
+    return ret;
 }
 
 void
diff -r f2ba08549466 -r bd33ff263e2c xen/common/memory.c
--- a/xen/common/memory.c       Mon Jan 05 10:43:19 2009 +0000
+++ b/xen/common/memory.c       Mon Jan 05 10:43:50 2009 +0000
@@ -192,6 +192,11 @@ static void decrease_reservation(struct 
         if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
             goto out;
 
+        /* See if populate-on-demand wants to handle this */
+        if ( is_hvm_domain(a->domain)
+             && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) 
)
+            continue;
+
         for ( j = 0; j < (1 << a->extent_order); j++ )
             if ( !guest_remove_page(a->domain, gmfn + j) )
                 goto out;
diff -r f2ba08549466 -r bd33ff263e2c xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Mon Jan 05 10:43:19 2009 +0000
+++ b/xen/include/asm-x86/p2m.h Mon Jan 05 10:43:50 2009 +0000
@@ -258,6 +258,13 @@ void p2m_pod_dump_data(struct domain *d)
  * (usually in preparation for domain destruction) */
 void p2m_pod_empty_cache(struct domain *d);
 
+/* Call when decreasing memory reservation to handle PoD entries properly.
+ * Will return '1' if all entries were handled and nothing more need be done.*/
+int
+p2m_pod_decrease_reservation(struct domain *d,
+                             xen_pfn_t gpfn,
+                             unsigned int order);
+
 /* Add a page to a domain's p2m table */
 int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
                             unsigned long mfn, unsigned int page_order, 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.