[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Patch to enable the destruction of domains which have shadow mode enabled.
ChangeSet 1.1340.1.1, 2005/04/21 18:29:29+01:00, maf46@xxxxxxxxxxxxxxxxx Patch to enable the destruction of domains which have shadow mode enabled. Also fixes some shadow mode ref counting bugs, and one bug in check_pte() recently introduced by the changes in page table accessor types/macros. Signed-off-by: michael.fetterman@xxxxxxxxxxxx arch/x86/domain.c | 27 +++++-- arch/x86/shadow.c | 160 +++++++++++++++++++++++++++++++++++------------ arch/x86/vmx.c | 25 ++++--- common/page_alloc.c | 1 include/asm-x86/shadow.h | 7 +- 5 files changed, 162 insertions(+), 58 deletions(-) diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-04-21 21:05:23 -04:00 +++ b/xen/arch/x86/domain.c 2005-04-21 21:05:24 -04:00 @@ -986,28 +986,41 @@ /* Release device mappings of other domains */ gnttab_release_dev_mappings(d->grant_table); - /* Exit shadow mode before deconstructing final guest page table. */ - shadow_mode_disable(d); - /* Drop the in-use references to page-table bases. */ for_each_exec_domain ( d, ed ) { if ( pagetable_val(ed->arch.guest_table) != 0 ) { - put_page_and_type(&frame_table[ - pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT]); + struct pfn_info *page = + &frame_table[pagetable_val(ed->arch.guest_table)>>PAGE_SHIFT]; + + if ( shadow_mode_enabled(d) ) + put_page(page); + else + put_page_and_type(page); + ed->arch.guest_table = mk_pagetable(0); } if ( pagetable_val(ed->arch.guest_table_user) != 0 ) { - put_page_and_type(&frame_table[ - pagetable_val(ed->arch.guest_table_user) >> PAGE_SHIFT]); + struct pfn_info *page = + &frame_table[pagetable_val(ed->arch.guest_table_user) + >> PAGE_SHIFT]; + + if ( shadow_mode_enabled(d) ) + put_page(page); + else + put_page_and_type(page); + ed->arch.guest_table_user = mk_pagetable(0); } vmx_relinquish_resources(ed); } + + /* Exit shadow mode before deconstructing final guest page table. */ + shadow_mode_destroy(d); /* * Relinquish GDT mappings. No need for explicit unmapping of the LDT as diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c 2005-04-21 21:05:24 -04:00 +++ b/xen/arch/x86/shadow.c 2005-04-21 21:05:24 -04:00 @@ -33,6 +33,7 @@ static void shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry); static void remove_out_of_sync_entries(struct domain *d, unsigned long smfn); +static void free_writable_pte_predictions(struct domain *d); /******** @@ -554,18 +555,22 @@ for_each_exec_domain(d, ed) { l2_pgentry_t *mpl2e = ed->arch.monitor_vtable; - l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; - l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; - if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) + if ( mpl2e ) { - put_shadow_ref(l2e_get_pfn(hl2e)); - mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); - } - if ( l2e_get_flags(smfn) & _PAGE_PRESENT ) - { - put_shadow_ref(l2e_get_pfn(smfn)); - mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); + l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; + l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; + + if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) + { + put_shadow_ref(l2e_get_pfn(hl2e)); + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); + } + if ( l2e_get_flags(smfn) & _PAGE_PRESENT ) + { + put_shadow_ref(l2e_get_pfn(smfn)); + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); + } } } } @@ -586,12 +591,11 @@ unsigned long *mfn_list; /* Skip empty buckets. */ - x = &d->arch.shadow_ht[i]; - if ( x->gpfn_and_flags == 0 ) + if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) continue; count = 0; - for ( ; x != NULL; x = x->next ) + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) if ( PINNED(x->smfn) ) count++; if ( !count ) @@ -675,14 +679,13 @@ unsigned long mfn; ASSERT( pagetable_val(ed->arch.monitor_table) ); - ASSERT( shadow_mode_external(ed->domain) ); mpl2e = ed->arch.monitor_vtable; /* * First get the mfn for hl2_table by looking at monitor_table */ - hl2e = mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; + hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) { mfn = l2e_get_pfn(hl2e); @@ -690,7 +693,7 @@ put_shadow_ref(mfn); } - sl2e = mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; + sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; if ( l2e_get_flags(sl2e) & _PAGE_PRESENT ) { mfn = l2e_get_pfn(sl2e); @@ -1108,6 +1111,34 @@ d->arch.out_of_sync_extras_count); } +void shadow_mode_destroy(struct domain *d) +{ + shadow_lock(d); + + free_shadow_pages(d); + free_writable_pte_predictions(d); + +#ifndef NDEBUG + int i; + for ( i = 0; i < shadow_ht_buckets; i++ ) + { + if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) + { + printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%p\n", + i, d->arch.shadow_ht[i].gpfn_and_flags); + BUG(); + } + } +#endif + + d->arch.shadow_mode = 0; + + free_shadow_ht_entries(d); + free_out_of_sync_entries(d); + + shadow_unlock(d); +} + void __shadow_mode_disable(struct domain *d) { // This needs rethinking for the full shadow mode stuff. @@ -1914,6 +1945,42 @@ } } +static void +free_writable_pte_predictions(struct domain *d) +{ + int i; + struct shadow_status *x; + + for ( i = 0; i < shadow_ht_buckets; i++ ) + { + u32 count; + unsigned long *gpfn_list; + + /* Skip empty buckets. */ + if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) + continue; + + count = 0; + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) + if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) + count++; + + gpfn_list = xmalloc_array(unsigned long, count); + count = 0; + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) + if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) + gpfn_list[count++] = x->gpfn_and_flags & PGT_mfn_mask; + + while ( count ) + { + count--; + delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); + } + + xfree(gpfn_list); + } +} + static u32 remove_all_write_access_in_ptpage( struct domain *d, unsigned long pt_pfn, unsigned long pt_mfn, unsigned long readonly_gpfn, unsigned long readonly_gmfn, @@ -2606,30 +2673,39 @@ char * sh_check_name; int shadow_status_noswap; -#define v2m(adr) ({ \ - unsigned long _a = (unsigned long)(adr); \ - l1_pgentry_t _pte = shadow_linear_pg_table[_a >> PAGE_SHIFT]; \ - unsigned long _pa = l1e_get_phys(_pte); \ - _pa | (_a & ~PAGE_MASK); \ +#define v2m(_ed, _adr) ({ \ + unsigned long _a = (unsigned long)(_adr); \ + l2_pgentry_t _pde = shadow_linear_l2_table(_ed)[l2_table_offset(_a)]; \ + unsigned long _pa = -1; \ + if ( l2e_get_flags(_pde) & _PAGE_PRESENT ) \ + { \ + l1_pgentry_t _pte; \ + _pte = shadow_linear_pg_table[l1_linear_offset(_a)]; \ + if ( l1e_get_flags(_pte) & _PAGE_PRESENT ) \ + _pa = l1e_get_phys(_pte); \ + } \ + _pa | (_a & ~PAGE_MASK); \ }) #define FAIL(_f, _a...) \ do { \ - printk("XXX %s-FAIL (%d,%d,%d)" _f "\n" \ - "g=%08lx s=%08lx &g=%08lx &s=%08lx" \ + printk("XXX %s-FAIL (%d,%d,%d)" _f " at %s(%d)\n", \ + sh_check_name, level, l2_idx, l1_idx, ## _a, \ + __FILE__, __LINE__); \ + printk("g=%08lx s=%08lx &g=%08lx &s=%08lx" \ " v2m(&g)=%08lx v2m(&s)=%08lx ea=%08lx\n", \ - sh_check_name, level, l2_idx, l1_idx, ## _a , \ gpte, spte, pgpte, pspte, \ - v2m(pgpte), v2m(pspte), \ + v2m(ed, pgpte), v2m(ed, pspte), \ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |