[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86 mm: free p2m pages to the shadow/hap pool.



x86 mm: free p2m pages to the shadow/hap pool.

This allows the p2m code to dynamically free and reallocate memory
rather than just freeing everything once at domain teardown. 
The previous mechanism (allocating p2m pages from shadow/hap 
memory but freeing them directly to the domheap) was a relic 
of the original shadow2's rather complex pool code.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r 65010d314adb xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Mon Sep 13 17:48:19 2010 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Sep 14 17:14:37 2010 +0100
@@ -329,8 +329,9 @@ static void hap_free_p2m_page(struct p2m
     /* Free should not decrement domain's total allocation, since
      * these pages were allocated without an owner. */
     page_set_owner(pg, NULL);
-    free_domheap_page(pg);
     d->arch.paging.hap.p2m_pages--;
+    d->arch.paging.hap.total_pages++;
+    hap_free(d, page_to_mfn(pg));
     ASSERT(d->arch.paging.hap.p2m_pages >= 0);
     hap_unlock(d);
 }
@@ -618,7 +619,11 @@ void hap_final_teardown(struct domain *d
         hap_teardown(d);
 
     p2m_teardown(p2m_get_hostp2m(d));
+    /* Free any memory that the p2m teardown released */
+    hap_lock(d);
+    hap_set_allocation(d, 0, NULL);
     ASSERT(d->arch.paging.hap.p2m_pages == 0);
+    hap_unlock(d);
 }
 
 void hap_teardown(struct domain *d)
diff -r 65010d314adb xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Sep 13 17:48:19 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Sep 14 17:14:37 2010 +0100
@@ -1578,7 +1578,6 @@ void shadow_free(struct domain *d, mfn_t
 
     shadow_type = sp->u.sh.type;
     ASSERT(shadow_type != SH_type_none);
-    ASSERT(shadow_type != SH_type_p2m_table);
     ASSERT(sp->u.sh.head || (shadow_type > SH_type_max_shadow));
     pages = shadow_size(shadow_type);
 
@@ -1638,6 +1637,8 @@ shadow_alloc_p2m_page(struct p2m_domain 
  
     shadow_prealloc(d, SH_type_p2m_table, 1);
     pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
+    d->arch.paging.shadow.p2m_pages++;
+    d->arch.paging.shadow.total_pages--;
 
     shadow_unlock(d);
 
@@ -1648,8 +1649,6 @@ shadow_alloc_p2m_page(struct p2m_domain 
      * believed to be a concern. */
     page_set_owner(pg, d);
     pg->count_info |= 1;
-    d->arch.paging.shadow.p2m_pages++;
-    d->arch.paging.shadow.total_pages--;
     return pg;
 }
 
@@ -1665,12 +1664,14 @@ shadow_free_p2m_page(struct p2m_domain *
                      pg->count_info, pg->u.inuse.type_info);
     }
     pg->count_info &= ~PGC_count_mask;
-    /* Free should not decrement domain's total allocation, since 
-     * these pages were allocated without an owner. */
+    pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
     page_set_owner(pg, NULL); 
-    free_domheap_pages(pg, 0);
+
+    shadow_lock(d);
+    shadow_free(d, page_to_mfn(pg));
     d->arch.paging.shadow.p2m_pages--;
-    perfc_decr(shadow_alloc_count);
+    d->arch.paging.shadow.total_pages++;
+    shadow_unlock(d);
 }
 
 #if CONFIG_PAGING_LEVELS == 3
@@ -3115,7 +3116,7 @@ void shadow_teardown(struct domain *d)
 {
     struct vcpu *v;
     mfn_t mfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    struct page_info *unpaged_pagetable = NULL;
 
     ASSERT(d->is_dying);
     ASSERT(d != current->domain);
@@ -3201,8 +3202,8 @@ void shadow_teardown(struct domain *d)
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
         }
-        shadow_free_p2m_page(p2m, 
-            pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable));
+        unpaged_pagetable = 
+            pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
     }
 
@@ -3219,6 +3220,10 @@ void shadow_teardown(struct domain *d)
     }
 
     shadow_unlock(d);
+
+    /* Must be called outside the lock */
+    if ( unpaged_pagetable ) 
+        shadow_free_p2m_page(p2m_get_hostp2m(d), unpaged_pagetable);
 }
 
 void shadow_final_teardown(struct domain *d)
@@ -3239,13 +3244,16 @@ void shadow_final_teardown(struct domain
 
     /* It is now safe to pull down the p2m map. */
     p2m_teardown(p2m_get_hostp2m(d));
-
+    /* Free any shadow memory that the p2m teardown released */
+    shadow_lock(d);
+    sh_set_allocation(d, 0, NULL);
     SHADOW_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
                    d->arch.paging.shadow.total_pages, 
                    d->arch.paging.shadow.free_pages, 
                    d->arch.paging.shadow.p2m_pages);
+    shadow_unlock(d);
 }
 
 static int shadow_one_bit_enable(struct domain *d, u32 mode)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.