[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] continuable destroy domain: ia64 part



# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1188280280 -32400
# Node ID b4fe65fdc26d5b56057f3711e7d8bf4a3617cfa8
# Parent  b8f3785f15bde5da96b4ffd1cde3e677d54abf90
Implement ia64 continuable domain detroy.
PATCHNAME: implement_ia64_continuable_domain_destroy

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r b8f3785f15bd -r b4fe65fdc26d xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Aug 31 19:52:21 2007 +0900
+++ b/xen/arch/ia64/xen/domain.c        Tue Aug 28 14:51:20 2007 +0900
@@ -563,6 +563,7 @@ int arch_domain_create(struct domain *d)
                goto fail_nomem;
 
        memset(&d->arch.mm, 0, sizeof(d->arch.mm));
+       d->arch.mm_teardown_offset = 0;
 
        if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
            goto fail_nomem;
@@ -938,12 +939,15 @@ static void relinquish_memory(struct dom
 
 int domain_relinquish_resources(struct domain *d)
 {
+    int ret;
     /* Relinquish guest resources for VT-i domain. */
     if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
            vmx_relinquish_guest_resources(d);
 
     /* Tear down shadow mode stuff. */
-    mm_teardown(d);
+    ret = mm_teardown(d);
+    if (ret != 0)
+        return ret;
 
     /* Relinquish every page of memory. */
     relinquish_memory(d, &d->xenpage_list);
diff -r b8f3785f15bd -r b4fe65fdc26d xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Aug 31 19:52:21 2007 +0900
+++ b/xen/arch/ia64/xen/mm.c    Tue Aug 28 14:51:20 2007 +0900
@@ -215,6 +215,18 @@ alloc_dom_xen_and_dom_io(void)
     BUG_ON(dom_io == NULL);
 }
 
+static int
+mm_teardown_can_skip(struct domain* d, unsigned long offset)
+{
+    return d->arch.mm_teardown_offset > offset;
+}
+
+static void
+mm_teardown_update_offset(struct domain* d, unsigned long offset)
+{
+    d->arch.mm_teardown_offset = offset;
+}
+
 static void
 mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset)
 {
@@ -252,46 +264,73 @@ mm_teardown_pte(struct domain* d, volati
     }
 }
 
-static void
+static int
 mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset)
 {
     unsigned long i;
     volatile pte_t* pte = pte_offset_map(pmd, offset);
 
     for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
-        if (!pte_present(*pte)) // acquire semantics
+        unsigned long cur_offset = offset + (i << PAGE_SHIFT);
+        if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE))
             continue;
-        mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT));
-    }
-}
-
-static void
+        if (!pte_present(*pte)) { // acquire semantics
+            mm_teardown_update_offset(d, cur_offset);
+            continue;
+        }
+        mm_teardown_update_offset(d, cur_offset);
+        mm_teardown_pte(d, pte, cur_offset);
+        if (hypercall_preempt_check())
+            return -EAGAIN;
+    }
+    return 0;
+}
+
+static int
 mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset)
 {
     unsigned long i;
     volatile pmd_t *pmd = pmd_offset(pud, offset);
 
     for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
-        if (!pmd_present(*pmd)) // acquire semantics
+        unsigned long cur_offset = offset + (i << PMD_SHIFT);
+        if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE))
             continue;
-        mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
-    }
-}
-
-static void
+        if (!pmd_present(*pmd)) { // acquire semantics
+            mm_teardown_update_offset(d, cur_offset);
+            continue;
+        }
+        if (mm_teardown_pmd(d, pmd, cur_offset))
+            return -EAGAIN;
+    }
+    return 0;
+}
+
+static int
 mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset)
 {
     unsigned long i;
     volatile pud_t *pud = pud_offset(pgd, offset);
 
     for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
-        if (!pud_present(*pud)) // acquire semantics
+        unsigned long cur_offset = offset + (i << PUD_SHIFT);
+#ifndef __PAGETABLE_PUD_FOLDED
+        if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE))
             continue;
-        mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
-    }
-}
-
-void
+#endif
+        if (!pud_present(*pud)) { // acquire semantics
+#ifndef __PAGETABLE_PUD_FOLDED
+            mm_teardown_update_offset(d, cur_offset);
+#endif
+            continue;
+        }
+        if (mm_teardown_pud(d, pud, cur_offset))
+            return -EAGAIN;
+    }
+    return 0;
+}
+
+int
 mm_teardown(struct domain* d)
 {
     struct mm_struct* mm = &d->arch.mm;
diff -r b8f3785f15bd -r b4fe65fdc26d xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Aug 31 19:52:21 2007 +0900
+++ b/xen/include/asm-ia64/domain.h     Tue Aug 28 14:51:20 2007 +0900
@@ -171,6 +171,9 @@ struct arch_domain {
 #ifdef CONFIG_XEN_IA64_TLB_TRACK
     struct tlb_track*   tlb_track;
 #endif
+
+    /* for domctl_destroy_domain continuation */
+    unsigned long mm_teardown_offset;
 };
 #define INT_ENABLE_OFFSET(v)             \
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \
diff -r b8f3785f15bd -r b4fe65fdc26d xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Fri Aug 31 19:52:21 2007 +0900
+++ b/xen/include/asm-ia64/mm.h Tue Aug 28 14:51:20 2007 +0900
@@ -417,7 +417,7 @@ extern int nr_swap_pages;
 extern int nr_swap_pages;
 
 extern void alloc_dom_xen_and_dom_io(void);
-extern void mm_teardown(struct domain* d);
+extern int mm_teardown(struct domain* d);
 extern void mm_final_teardown(struct domain* d);
 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned 
long mpaddr);
 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);

Attachment: 15812_b4fe65fdc26d_implement_continuable_domain_destroy.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.