[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] fix p2m table destruction



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID b39844e292f66762a02b2080c0f04b5e7bc58ead
# Parent  0114b372dfaead0a97a5bc5933dff21ac1178846
[IA64] fix p2m table destruction

Introduce delayed p2m table destruction

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@xxxxxxxxxxxxxx>
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c |   15 ++---
 xen/arch/ia64/xen/mm.c     |  118 ++++++++++++++++++++++++++++++++-------------
 xen/include/asm-ia64/mm.h  |    3 -
 3 files changed, 93 insertions(+), 43 deletions(-)

diff -r 0114b372dfae -r b39844e292f6 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Nov 22 10:13:31 2006 -0700
+++ b/xen/arch/ia64/xen/domain.c        Mon Nov 27 09:26:15 2006 -0700
@@ -462,11 +462,10 @@ fail_nomem1:
 
 void arch_domain_destroy(struct domain *d)
 {
-       BUG_ON(d->arch.mm.pgd != NULL);
+       mm_final_teardown(d);
+
        if (d->shared_info != NULL)
            free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
-       if (d->arch.shadow_bitmap != NULL)
-               xfree(d->arch.shadow_bitmap);
 
        tlb_track_destroy(d);
 
@@ -613,14 +612,14 @@ static void relinquish_memory(struct dom
 
 void domain_relinquish_resources(struct domain *d)
 {
-    /* Relinquish every page of memory. */
-
-    // relase page traversing d->arch.mm.
-    relinquish_mm(d);
-
+    /* Relinquish guest resources for VT-i domain. */
     if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
            vmx_relinquish_guest_resources(d);
 
+    /* Tear down shadow mode stuff. */
+    mm_teardown(d);
+
+    /* Relinquish every page of memory. */
     relinquish_memory(d, &d->xenpage_list);
     relinquish_memory(d, &d->page_list);
 
diff -r 0114b372dfae -r b39844e292f6 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Wed Nov 22 10:13:31 2006 -0700
+++ b/xen/arch/ia64/xen/mm.c    Mon Nov 27 09:26:15 2006 -0700
@@ -249,17 +249,21 @@ try_to_clear_PGC_allocate(struct domain*
 }
 
 static void
-relinquish_pte(struct domain* d, pte_t* pte)
-{
-    unsigned long mfn = pte_pfn(*pte);
+mm_teardown_pte(struct domain* d, pte_t* pte, unsigned long offset)
+{
+    pte_t old_pte;
+    unsigned long mfn;
     struct page_info* page;
 
+    old_pte = ptep_get_and_clear(&d->arch.mm, offset, pte);// acquire semantics
+    
     // vmx domain use bit[58:56] to distinguish io region from memory.
     // see vmx_build_physmap_table() in vmx_init.c
-    if (!pte_mem(*pte))
+    if (!pte_mem(old_pte))
         return;
 
     // domain might map IO space or acpi table pages. check it.
+    mfn = pte_pfn(old_pte);
     if (!mfn_valid(mfn))
         return;
     page = mfn_to_page(mfn);
@@ -272,17 +276,17 @@ relinquish_pte(struct domain* d, pte_t* 
         return;
     }
 
-    if (page_get_owner(page) == d) {
+    if (pte_pgc_allocated(old_pte)) {
+        BUG_ON(page_get_owner(page) != d);
         BUG_ON(get_gpfn_from_mfn(mfn) == INVALID_M2P_ENTRY);
         set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
-    }
-
-    try_to_clear_PGC_allocate(d, page);
+        try_to_clear_PGC_allocate(d, page);
+    }
     put_page(page);
 }
 
 static void
-relinquish_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
+mm_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
 {
     unsigned long i;
     pte_t* pte = pte_offset_map(pmd, offset);
@@ -290,14 +294,12 @@ relinquish_pmd(struct domain* d, pmd_t* 
     for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
         if (!pte_present(*pte))
             continue;
-
-        relinquish_pte(d, pte);
-    }
-    pte_free_kernel(pte_offset_map(pmd, offset));
+        mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT));
+    }
 }
 
 static void
-relinquish_pud(struct domain* d, pud_t *pud, unsigned long offset)
+mm_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset)
 {
     unsigned long i;
     pmd_t *pmd = pmd_offset(pud, offset);
@@ -305,14 +307,12 @@ relinquish_pud(struct domain* d, pud_t *
     for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
         if (!pmd_present(*pmd))
             continue;
-
-        relinquish_pmd(d, pmd, offset + (i << PMD_SHIFT));
-    }
-    pmd_free(pmd_offset(pud, offset));
+        mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
+    }
 }
 
 static void
-relinquish_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
+mm_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
 {
     unsigned long i;
     pud_t *pud = pud_offset(pgd, offset);
@@ -320,14 +320,12 @@ relinquish_pgd(struct domain* d, pgd_t *
     for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
         if (!pud_present(*pud))
             continue;
-
-        relinquish_pud(d, pud, offset + (i << PUD_SHIFT));
-    }
-    pud_free(pud_offset(pgd, offset));
+        mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
+    }
 }
 
 void
-relinquish_mm(struct domain* d)
+mm_teardown(struct domain* d)
 {
     struct mm_struct* mm = &d->arch.mm;
     unsigned long i;
@@ -340,11 +338,70 @@ relinquish_mm(struct domain* d)
     for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
         if (!pgd_present(*pgd))
             continue;
-
-        relinquish_pgd(d, pgd, i << PGDIR_SHIFT);
+        mm_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
+    }
+}
+
+static void
+mm_p2m_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
+{
+    pte_free_kernel(pte_offset_map(pmd, offset));
+}
+
+static void
+mm_p2m_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset)
+{
+    unsigned long i;
+    pmd_t *pmd = pmd_offset(pud, offset);
+
+    for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+        if (!pmd_present(*pmd))
+            continue;
+        mm_p2m_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
+    }
+    pmd_free(pmd_offset(pud, offset));
+}
+
+static void
+mm_p2m_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
+{
+    unsigned long i;
+    pud_t *pud = pud_offset(pgd, offset);
+
+    for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+        if (!pud_present(*pud))
+            continue;
+        mm_p2m_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
+    }
+    pud_free(pud_offset(pgd, offset));
+}
+
+static void
+mm_p2m_teardown(struct domain* d)
+{
+    struct mm_struct* mm = &d->arch.mm;
+    unsigned long i;
+    pgd_t* pgd;
+
+    BUG_ON(mm->pgd == NULL);
+    pgd = pgd_offset(mm, 0);
+    for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+        if (!pgd_present(*pgd))
+            continue;
+        mm_p2m_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
     }
     pgd_free(mm->pgd);
     mm->pgd = NULL;
+}
+
+void
+mm_final_teardown(struct domain* d)
+{
+    if (d->arch.shadow_bitmap != NULL) {
+        xfree(d->arch.shadow_bitmap);
+        d->arch.shadow_bitmap = NULL;
+    }
+    mm_p2m_teardown(d);
 }
 
 // stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c
@@ -400,13 +457,6 @@ gmfn_to_mfn_foreign(struct domain *d, un
 {
        unsigned long pte;
 
-       // This function may be called from __gnttab_copy()
-       // during domain destruction with VNIF copy receiver.
-       // ** FIXME: This is not SMP-safe yet about p2m table. **
-       if (unlikely(d->arch.mm.pgd == NULL)) {
-               BUG();
-               return INVALID_MFN;
-       }
        pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);
        if (!pte) {
                panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
@@ -1308,7 +1358,7 @@ expose_p2m_page(struct domain* d, unsign
     // pte page is allocated form xen heap.(see pte_alloc_one_kernel().)
     // so that the page has NULL page owner and it's reference count
     // is useless.
-    // see also relinquish_pte()'s page_get_owner() == NULL check.
+    // see also mm_teardown_pte()'s page_get_owner() == NULL check.
     BUG_ON(page_get_owner(page) != NULL);
 
     return __assign_domain_page(d, mpaddr, page_to_maddr(page),
diff -r 0114b372dfae -r b39844e292f6 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Nov 22 10:13:31 2006 -0700
+++ b/xen/include/asm-ia64/mm.h Mon Nov 27 09:26:15 2006 -0700
@@ -422,7 +422,8 @@ extern int nr_swap_pages;
 extern int nr_swap_pages;
 
 extern void alloc_dom_xen_and_dom_io(void);
-extern void relinquish_mm(struct domain* d);
+extern void mm_teardown(struct domain* d);
+extern void mm_final_teardown(struct domain* d);
 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned 
long mpaddr);
 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, 
unsigned long physaddr, unsigned long flags);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.