[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Now booting VT-X domain with new shadow code.
ChangeSet 1.1293.1.1, 2005/04/14 21:51:32+01:00, maf46@xxxxxxxxxxxxxxxxx Now booting VT-X domain with new shadow code. Signed-off-by: michael.fetterman@xxxxxxxxxxxx arch/x86/audit.c | 8 - arch/x86/shadow.c | 198 ++++++++++++++++++++++++++++++--------------- arch/x86/traps.c | 3 arch/x86/vmx.c | 3 include/asm-x86/regs.h | 6 + include/asm-x86/shadow.h | 55 +++--------- include/asm-x86/vmx_vmcs.h | 2 7 files changed, 160 insertions(+), 115 deletions(-) diff -Nru a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c --- a/xen/arch/x86/audit.c 2005-04-14 18:03:49 -04:00 +++ b/xen/arch/x86/audit.c 2005-04-14 18:03:49 -04:00 @@ -35,7 +35,6 @@ static int l1, l2, oos_count, page_count; #define FILE_AND_LINE 0 -//#define MFN2_TO_WATCH 0x1d8 #if FILE_AND_LINE #define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__) @@ -55,13 +54,6 @@ void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS) { -#ifdef MFN2_TO_WATCH - if (page_to_pfn(page) == MFN2_TO_WATCH) - { - APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d)", - page_to_pfn(page), dir, adjtype); - } -#endif if ( adjtype ) { // adjust the type count diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c 2005-04-14 18:03:49 -04:00 +++ b/xen/arch/x86/shadow.c 2005-04-14 18:03:49 -04:00 @@ -68,9 +68,14 @@ if ( !shadow_remove_all_write_access(d, gpfn, gmfn) ) { - FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%p gmfn=%p\n", + FSH_LOG("%s: couldn't find/remove all write accesses, gpfn=%p gmfn=%p", __func__, gpfn, gmfn); +#if 1 || defined(LIVE_DANGEROUSLY) + set_bit(_PGC_page_table, &page->count_info); + return 1; +#endif return 0; + } // To convert this page to use as a page table, the writable count @@ -288,7 +293,7 @@ return smfn; fail: - FSH_LOG("promotion of pfn=%p mfn=%p failed! external gnttab refs?\n", + FSH_LOG("promotion of pfn=%p mfn=%p failed! external gnttab refs?", gpfn, gmfn); free_domheap_page(page); return 0; @@ -311,9 +316,6 @@ } unmap_domain_mem(pl1e); - - list_add(&spage->list, &d->arch.free_shadow_frames); - perfc_incr(free_l1_pages); } static void inline @@ -322,6 +324,8 @@ l1_pgentry_t *hl2 = map_domain_mem(smfn << PAGE_SHIFT); int i, limit; + SH_VVLOG("%s: smfn=%p freed\n", __func__, smfn); + #ifdef __i386__ if ( shadow_mode_external(d) ) limit = L2_PAGETABLE_ENTRIES; @@ -353,8 +357,7 @@ put_shadow_ref(pl2e[i] >> PAGE_SHIFT); if ( (PGT_base_page_table == PGT_l2_page_table) && - shadow_mode_translate(d) && - !shadow_mode_external(d) ) + shadow_mode_translate(d) && !external ) { // free the ref to the hl2 // @@ -373,6 +376,8 @@ unsigned long gpfn = __mfn_to_gpfn(d, gmfn); unsigned long type = page->u.inuse.type_info & PGT_type_mask; + SH_VVLOG("%s: free'ing smfn=%p", __func__, smfn); + ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) ); delete_shadow_status(d, gpfn, gmfn, type); @@ -414,7 +419,12 @@ page->tlbflush_timestamp = 0; page->u.free.cpu_mask = 0; - if ( type != PGT_l1_shadow ) + if ( type == PGT_l1_shadow ) + { + list_add(&page->list, &d->arch.free_shadow_frames); + perfc_incr(free_l1_pages); + } + else free_domheap_page(page); } @@ -510,9 +520,9 @@ static void free_shadow_pages(struct domain *d) { - int i, free = 0; - struct shadow_status *x, *n; - struct exec_domain *e; + int i; + struct shadow_status *x; + struct exec_domain *ed; /* * WARNING! The shadow page table must not currently be in use! @@ -529,58 +539,81 @@ // second, remove any outstanding refs from ed->arch.shadow_table... // - for_each_exec_domain(d, e) + for_each_exec_domain(d, ed) { - if ( pagetable_val(e->arch.shadow_table) ) + if ( pagetable_val(ed->arch.shadow_table) ) { - put_shadow_ref(pagetable_val(e->arch.shadow_table) >> PAGE_SHIFT); - e->arch.shadow_table = mk_pagetable(0); + put_shadow_ref(pagetable_val(ed->arch.shadow_table) >> PAGE_SHIFT); + ed->arch.shadow_table = mk_pagetable(0); + } + } + + // For external shadows, remove the monitor table's refs + // + if ( shadow_mode_external(d) ) + { + for_each_exec_domain(d, ed) + { + l2_pgentry_t *mpl2e = ed->arch.monitor_vtable; + l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; + l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; + if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT ) + { + put_shadow_ref(l2_pgentry_val(hl2e) >> PAGE_SHIFT); + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); + } + if ( l2_pgentry_val(smfn) & _PAGE_PRESENT ) + { + put_shadow_ref(l2_pgentry_val(smfn) >> PAGE_SHIFT); + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); + } } } // Now, the only refs to shadow pages that are left are from the shadow - // pages themselves. We can just free them. + // pages themselves. We just unpin the pinned pages, and the rest + // should automatically disappear. // + // NB: Beware: each explicitly or implicit call to free_shadow_page + // can/will result in the hash bucket getting rewritten out from + // under us... First, collect the list of pinned pages, then + // free them. + // +#define PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned) for ( i = 0; i < shadow_ht_buckets; i++ ) { + u32 count; + unsigned long *mfn_list; + /* Skip empty buckets. */ x = &d->arch.shadow_ht[i]; if ( x->gpfn_and_flags == 0 ) continue; - /* Free the head page. */ - free_shadow_page(x->smfn); - - /* Reinitialise the head node. */ - x->gpfn_and_flags = 0; - x->smfn = 0; - n = x->next; - x->next = NULL; - - free++; - - /* Iterate over non-head nodes. */ - for ( x = n; x != NULL; x = n ) - { - /* Free the shadow page. */ - free_shadow_page(x->smfn); - - /* Re-initialise the chain node. */ - x->gpfn_and_flags = 0; - x->smfn = 0; + count = 0; + for ( ; x != NULL; x = x->next ) + if ( PINNED(x->smfn) ) + count++; + if ( !count ) + continue; - /* Add to the free list. */ - n = x->next; - x->next = d->arch.shadow_ht_free; - d->arch.shadow_ht_free = x; + mfn_list = xmalloc_array(unsigned long, count); + count = 0; + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) + if ( PINNED(x->smfn) ) + mfn_list[count++] = x->smfn; - free++; + while ( count ) + { + shadow_unpin(mfn_list[--count]); } - - shadow_audit(d, 0); + xfree(mfn_list); } +#undef PINNED + + shadow_audit(d, 0); - SH_LOG("Free shadow table. Freed=%d.", free); + SH_LOG("Free shadow table."); } void shadow_mode_init(void) @@ -622,6 +655,12 @@ mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = mk_l2_pgentry(pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR); + // Don't (yet) have mappings for these... + // Don't want to accidentally see the idle_pg_table's linear mapping. + // + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0); + ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); ed->arch.monitor_vtable = mpl2e; } @@ -631,7 +670,7 @@ */ void free_monitor_pagetable(struct exec_domain *ed) { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |