[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Implement ia64 continuable domain destroy.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1188571597 -3600 # Node ID 7e79e7f01f3d9aed460f9af54a652294a0aa5d62 # Parent bd59dd48e2084c5b1ec9ae8c8dfde87690a5ef1b Implement ia64 continuable domain destroy. Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> --- xen/arch/ia64/xen/domain.c | 6 ++- xen/arch/ia64/xen/mm.c | 77 +++++++++++++++++++++++++++++++----------- xen/include/asm-ia64/domain.h | 3 + xen/include/asm-ia64/mm.h | 2 - 4 files changed, 67 insertions(+), 21 deletions(-) diff -r bd59dd48e208 -r 7e79e7f01f3d xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Fri Aug 31 15:44:38 2007 +0100 +++ b/xen/arch/ia64/xen/domain.c Fri Aug 31 15:46:37 2007 +0100 @@ -563,6 +563,7 @@ int arch_domain_create(struct domain *d) goto fail_nomem; memset(&d->arch.mm, 0, sizeof(d->arch.mm)); + d->arch.mm_teardown_offset = 0; if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL) goto fail_nomem; @@ -938,12 +939,15 @@ static void relinquish_memory(struct dom int domain_relinquish_resources(struct domain *d) { + int ret; /* Relinquish guest resources for VT-i domain. */ if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0])) vmx_relinquish_guest_resources(d); /* Tear down shadow mode stuff. */ - mm_teardown(d); + ret = mm_teardown(d); + if (ret != 0) + return ret; /* Relinquish every page of memory. */ relinquish_memory(d, &d->xenpage_list); diff -r bd59dd48e208 -r 7e79e7f01f3d xen/arch/ia64/xen/mm.c --- a/xen/arch/ia64/xen/mm.c Fri Aug 31 15:44:38 2007 +0100 +++ b/xen/arch/ia64/xen/mm.c Fri Aug 31 15:46:37 2007 +0100 @@ -215,6 +215,18 @@ alloc_dom_xen_and_dom_io(void) BUG_ON(dom_io == NULL); } +static int +mm_teardown_can_skip(struct domain* d, unsigned long offset) +{ + return d->arch.mm_teardown_offset > offset; +} + +static void +mm_teardown_update_offset(struct domain* d, unsigned long offset) +{ + d->arch.mm_teardown_offset = offset; +} + static void mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset) { @@ -252,46 +264,73 @@ mm_teardown_pte(struct domain* d, volati } } -static void +static int mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset) { unsigned long i; volatile pte_t* pte = pte_offset_map(pmd, offset); for (i = 0; i < PTRS_PER_PTE; i++, pte++) { - if (!pte_present(*pte)) // acquire semantics + unsigned long cur_offset = offset + (i << PAGE_SHIFT); + if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE)) continue; - mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT)); - } -} - -static void + if (!pte_present(*pte)) { // acquire semantics + mm_teardown_update_offset(d, cur_offset); + continue; + } + mm_teardown_update_offset(d, cur_offset); + mm_teardown_pte(d, pte, cur_offset); + if (hypercall_preempt_check()) + return -EAGAIN; + } + return 0; +} + +static int mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset) { unsigned long i; volatile pmd_t *pmd = pmd_offset(pud, offset); for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { - if (!pmd_present(*pmd)) // acquire semantics + unsigned long cur_offset = offset + (i << PMD_SHIFT); + if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE)) continue; - mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT)); - } -} - -static void + if (!pmd_present(*pmd)) { // acquire semantics + mm_teardown_update_offset(d, cur_offset); + continue; + } + if (mm_teardown_pmd(d, pmd, cur_offset)) + return -EAGAIN; + } + return 0; +} + +static int mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset) { unsigned long i; volatile pud_t *pud = pud_offset(pgd, offset); for (i = 0; i < PTRS_PER_PUD; i++, pud++) { - if (!pud_present(*pud)) // acquire semantics + unsigned long cur_offset = offset + (i << PUD_SHIFT); +#ifndef __PAGETABLE_PUD_FOLDED + if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE)) continue; - mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT)); - } -} - -void +#endif + if (!pud_present(*pud)) { // acquire semantics +#ifndef __PAGETABLE_PUD_FOLDED + mm_teardown_update_offset(d, cur_offset); +#endif + continue; + } + if (mm_teardown_pud(d, pud, cur_offset)) + return -EAGAIN; + } + return 0; +} + +int mm_teardown(struct domain* d) { struct mm_struct* mm = &d->arch.mm; diff -r bd59dd48e208 -r 7e79e7f01f3d xen/include/asm-ia64/domain.h --- a/xen/include/asm-ia64/domain.h Fri Aug 31 15:44:38 2007 +0100 +++ b/xen/include/asm-ia64/domain.h Fri Aug 31 15:46:37 2007 +0100 @@ -171,6 +171,9 @@ struct arch_domain { #ifdef CONFIG_XEN_IA64_TLB_TRACK struct tlb_track* tlb_track; #endif + + /* for domctl_destroy_domain continuation */ + unsigned long mm_teardown_offset; }; #define INT_ENABLE_OFFSET(v) \ (sizeof(vcpu_info_t) * (v)->vcpu_id + \ diff -r bd59dd48e208 -r 7e79e7f01f3d xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Fri Aug 31 15:44:38 2007 +0100 +++ b/xen/include/asm-ia64/mm.h Fri Aug 31 15:46:37 2007 +0100 @@ -417,7 +417,7 @@ extern int nr_swap_pages; extern int nr_swap_pages; extern void alloc_dom_xen_and_dom_io(void); -extern void mm_teardown(struct domain* d); +extern int mm_teardown(struct domain* d); extern void mm_final_teardown(struct domain* d); extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr); extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |