[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86 shadow: for multi-page shadows, explicitly track the first page
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxx> # Date 1283336627 -3600 # Node ID 14fcf5b873445880a1c7ab2a63ac83b5351d6d80 # Parent ae0cd4e5cc0149ceb5d3dd61a003aadc008f036e x86 shadow: for multi-page shadows, explicitly track the first page (where the refcounts are) and check that none of the routines that do refcounting ever see the second, third or fourth page. This is just stating and enforcing an existing implicit requirement. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- xen/arch/x86/mm/shadow/common.c | 8 +++++- xen/arch/x86/mm/shadow/multi.c | 48 +++++++++++++++++++++++++++++---------- xen/arch/x86/mm/shadow/private.h | 2 + xen/include/asm-x86/mm.h | 3 +- 4 files changed, 47 insertions(+), 14 deletions(-) diff -r ae0cd4e5cc01 -r 14fcf5b87344 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Wed Sep 01 10:19:14 2010 +0100 +++ b/xen/arch/x86/mm/shadow/common.c Wed Sep 01 11:23:47 2010 +0100 @@ -1593,6 +1593,9 @@ mfn_t shadow_alloc(struct domain *d, sp[i].u.sh.type = shadow_type; sp[i].u.sh.pinned = 0; sp[i].u.sh.count = 0; + sp[i].u.sh.head = ( shadow_type >= SH_type_min_shadow + && shadow_type <= SH_type_max_shadow + && i == 0 ); sp[i].v.sh.back = backpointer; set_next_shadow(&sp[i], NULL); perfc_incr(shadow_alloc_count); @@ -1616,6 +1619,7 @@ void shadow_free(struct domain *d, mfn_t shadow_type = sp->u.sh.type; ASSERT(shadow_type != SH_type_none); ASSERT(shadow_type != SH_type_p2m_table); + ASSERT(sp->u.sh.head || (shadow_type > SH_type_max_shadow)); order = shadow_order(shadow_type); d->arch.paging.shadow.free_pages += 1 << order; @@ -1637,7 +1641,7 @@ void shadow_free(struct domain *d, mfn_t } #endif /* Strip out the type: this is now a free shadow page */ - sp[i].u.sh.type = 0; + sp[i].u.sh.type = sp[i].u.sh.head = 0; /* Remember the TLB timestamp so we will know whether to flush * TLBs when we reuse the page. Because the destructors leave the * contents of the pages in place, we can delay TLB flushes until @@ -1941,6 +1945,8 @@ static void sh_hash_audit_bucket(struct /* Bogus type? */ BUG_ON( sp->u.sh.type == 0 ); BUG_ON( sp->u.sh.type > SH_type_max_shadow ); + /* Wrong page of a multi-page shadow? */ + BUG_ON( !sp->u.sh.head ); /* Wrong bucket? */ BUG_ON( sh_hash(__backpointer(sp), sp->u.sh.type) != bucket ); /* Duplicate entry? */ diff -r ae0cd4e5cc01 -r 14fcf5b87344 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Wed Sep 01 10:19:14 2010 +0100 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Sep 01 11:23:47 2010 +0100 @@ -94,6 +94,7 @@ get_fl1_shadow_status(struct vcpu *v, gf /* Look for FL1 shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn), SH_type_fl1_shadow); + ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head); return smfn; } @@ -102,6 +103,7 @@ get_shadow_status(struct vcpu *v, mfn_t /* Look for shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type); + ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head); perfc_incr(shadow_get_shadow_status); return smfn; } @@ -113,6 +115,7 @@ set_fl1_shadow_status(struct vcpu *v, gf SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n", gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); + ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn); } @@ -127,6 +130,8 @@ set_shadow_status(struct vcpu *v, mfn_t d->domain_id, v->vcpu_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); + ASSERT(mfn_to_page(smfn)->u.sh.head); + /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */ if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow ) { @@ -143,6 +148,7 @@ delete_fl1_shadow_status(struct vcpu *v, { SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n", gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); + ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn); } @@ -153,6 +159,7 @@ delete_shadow_status(struct vcpu *v, mfn SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n", v->domain->domain_id, v->vcpu_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); + ASSERT(mfn_to_page(smfn)->u.sh.head); shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn); /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */ if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow ) @@ -432,6 +439,7 @@ shadow_l1_index(mfn_t *smfn, u32 guest_i shadow_l1_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) + ASSERT(mfn_to_page(*smfn)->u.sh.head); *smfn = _mfn(mfn_x(*smfn) + (guest_index / SHADOW_L1_PAGETABLE_ENTRIES)); return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES); @@ -444,6 +452,7 @@ shadow_l2_index(mfn_t *smfn, u32 guest_i shadow_l2_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) + ASSERT(mfn_to_page(*smfn)->u.sh.head); // Because we use 2 shadow l2 entries for each guest entry, the number of // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2 // @@ -1023,6 +1032,7 @@ static int shadow_set_l2e(struct vcpu *v if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) { mfn_t sl1mfn = shadow_l2e_get_mfn(new_sl2e); + ASSERT(mfn_to_page(sl1mfn)->u.sh.head); /* About to install a new reference */ if ( !sh_get_ref(v, sl1mfn, paddr) ) @@ -1033,13 +1043,15 @@ static int shadow_set_l2e(struct vcpu *v #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) { struct page_info *sp = mfn_to_page(sl1mfn); - mfn_t gl1mfn = backpointer(sp); - + mfn_t gl1mfn; + + ASSERT(sp->u.sh.head); + gl1mfn = backpointer(sp); /* If the shadow is a fl1 then the backpointer contains the GFN instead of the GMFN, and it's definitely not OOS. */ if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn) - && mfn_is_out_of_sync(gl1mfn) ) + && mfn_is_out_of_sync(gl1mfn) ) sh_resync(v, gl1mfn); } #endif @@ -1993,15 +2005,17 @@ void sh_destroy_l4_shadow(struct vcpu *v void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn) { shadow_l4e_t *sl4e; - u32 t = mfn_to_page(smfn)->u.sh.type; + struct page_info *sp = mfn_to_page(smfn); + u32 t = sp->u.sh.type; mfn_t gmfn, sl4mfn; SHADOW_DEBUG(DESTROY_SHADOW, "%s(%05lx)\n", __func__, mfn_x(smfn)); ASSERT(t == SH_type_l4_shadow); + ASSERT(sp->u.sh.head); /* Record that the guest page isn't shadowed any more (in this type) */ - gmfn = backpointer(mfn_to_page(smfn)); + gmfn = backpointer(sp); delete_shadow_status(v, gmfn, t, smfn); shadow_demote(v, gmfn, t); /* Decrement refcounts of all the old entries */ @@ -2022,15 +2036,17 @@ void sh_destroy_l3_shadow(struct vcpu *v void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn) { shadow_l3e_t *sl3e; - u32 t = mfn_to_page(smfn)->u.sh.type; + struct page_info *sp = mfn_to_page(smfn); + u32 t = sp->u.sh.type; mfn_t gmfn, sl3mfn; SHADOW_DEBUG(DESTROY_SHADOW, "%s(%05lx)\n", __func__, mfn_x(smfn)); ASSERT(t == SH_type_l3_shadow); + ASSERT(sp->u.sh.head); /* Record that the guest page isn't shadowed any more (in this type) */ - gmfn = backpointer(mfn_to_page(smfn)); + gmfn = backpointer(sp); delete_shadow_status(v, gmfn, t, smfn); shadow_demote(v, gmfn, t); @@ -2052,7 +2068,8 @@ void sh_destroy_l2_shadow(struct vcpu *v void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn) { shadow_l2e_t *sl2e; - u32 t = mfn_to_page(smfn)->u.sh.type; + struct page_info *sp = mfn_to_page(smfn); + u32 t = sp->u.sh.type; mfn_t gmfn, sl2mfn; SHADOW_DEBUG(DESTROY_SHADOW, @@ -2063,9 +2080,10 @@ void sh_destroy_l2_shadow(struct vcpu *v #else ASSERT(t == SH_type_l2_shadow); #endif + ASSERT(sp->u.sh.head); /* Record that the guest page isn't shadowed any more (in this type) */ - gmfn = backpointer(mfn_to_page(smfn)); + gmfn = backpointer(sp); delete_shadow_status(v, gmfn, t, smfn); shadow_demote(v, gmfn, t); @@ -2086,21 +2104,23 @@ void sh_destroy_l1_shadow(struct vcpu *v { struct domain *d = v->domain; shadow_l1e_t *sl1e; - u32 t = mfn_to_page(smfn)->u.sh.type; + struct page_info *sp = mfn_to_page(smfn); + u32 t = sp->u.sh.type; SHADOW_DEBUG(DESTROY_SHADOW, "%s(%05lx)\n", __func__, mfn_x(smfn)); ASSERT(t == SH_type_l1_shadow || t == SH_type_fl1_shadow); + ASSERT(sp->u.sh.head); /* Record that the guest page isn't shadowed any more (in this type) */ if ( t == SH_type_fl1_shadow ) { - gfn_t gfn = _gfn(mfn_to_page(smfn)->v.sh.back); + gfn_t gfn = _gfn(sp->v.sh.back); delete_fl1_shadow_status(v, gfn, smfn); } else { - mfn_t gmfn = backpointer(mfn_to_page(smfn)); + mfn_t gmfn = backpointer(sp); delete_shadow_status(v, gmfn, t, smfn); shadow_demote(v, gmfn, t); } @@ -5160,6 +5180,7 @@ int sh_audit_l1_table(struct vcpu *v, mf int done = 0; /* Follow the backpointer */ + ASSERT(mfn_to_page(sl1mfn)->u.sh.head); gl1mfn = backpointer(mfn_to_page(sl1mfn)); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) @@ -5254,6 +5275,7 @@ int sh_audit_l2_table(struct vcpu *v, mf int done = 0; /* Follow the backpointer */ + ASSERT(mfn_to_page(sl2mfn)->u.sh.head); gl2mfn = backpointer(mfn_to_page(sl2mfn)); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) @@ -5303,6 +5325,7 @@ int sh_audit_l3_table(struct vcpu *v, mf int done = 0; /* Follow the backpointer */ + ASSERT(mfn_to_page(sl3mfn)->u.sh.head); gl3mfn = backpointer(mfn_to_page(sl3mfn)); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) @@ -5350,6 +5373,7 @@ int sh_audit_l4_table(struct vcpu *v, mf int done = 0; /* Follow the backpointer */ + ASSERT(mfn_to_page(sl4mfn)->u.sh.head); gl4mfn = backpointer(mfn_to_page(sl4mfn)); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) diff -r ae0cd4e5cc01 -r 14fcf5b87344 xen/arch/x86/mm/shadow/private.h --- a/xen/arch/x86/mm/shadow/private.h Wed Sep 01 10:19:14 2010 +0100 +++ b/xen/arch/x86/mm/shadow/private.h Wed Sep 01 11:23:47 2010 +0100 @@ -625,6 +625,7 @@ static inline int sh_get_ref(struct vcpu struct page_info *sp = mfn_to_page(smfn); ASSERT(mfn_valid(smfn)); + ASSERT(sp->u.sh.head); x = sp->u.sh.count; nx = x + 1; @@ -657,6 +658,7 @@ static inline void sh_put_ref(struct vcp struct page_info *sp = mfn_to_page(smfn); ASSERT(mfn_valid(smfn)); + ASSERT(sp->u.sh.head); ASSERT(!(sp->count_info & PGC_count_mask)); /* If this is the entry in the up-pointer, remove it */ diff -r ae0cd4e5cc01 -r 14fcf5b87344 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Wed Sep 01 10:19:14 2010 +0100 +++ b/xen/include/asm-x86/mm.h Wed Sep 01 11:23:47 2010 +0100 @@ -63,7 +63,8 @@ struct page_info struct { unsigned long type:5; /* What kind of shadow is this? */ unsigned long pinned:1; /* Is the shadow pinned? */ - unsigned long count:26; /* Reference count */ + unsigned long head:1; /* Is this the first page of the shadow? */ + unsigned long count:25; /* Reference count */ } sh; /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |