[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/8] x86/shadow: properly handle get_page() failing
We should not blindly (in a release build) insert the new entry in the hash if a reference to the guest page cannot be obtained, or else an excess reference would be put when removing the hash entry again. Crash the domain in that case instead. The sole caller doesn't further care about the state of the guest page: All it does is return the corresponding shadow page (which was obtained successfully before) to its caller. To compensate we further need to adjust hash removal: Since the shadow page already has had its backlink set, domain cleanup code would try to destroy the shadow, and hence still cause a put_page() without corresponding get_page(). Leverage that the failed get_page() leads to no hash insertion, making shadow_hash_delete() no longer assume it will find the requested entry. Instead return back whether the entry was found. This way delete_shadow_status() can avoid calling put_page() in the problem scenario. For the other caller of shadow_hash_delete() simply reinstate the otherwise dropped assertion at the call site. While touching the conditionals in {set,delete}_shadow_status() anyway, also switch around their two pre-existing parts, to have the cheap one first (frequently allowing to avoid evaluation of the expensive - due to evaluate_nospec() - one altogether). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1581,7 +1581,7 @@ void shadow_hash_insert(struct domain *d sh_hash_audit_bucket(d, key); } -void shadow_hash_delete(struct domain *d, unsigned long n, unsigned int t, +bool shadow_hash_delete(struct domain *d, unsigned long n, unsigned int t, mfn_t smfn) /* Excise the mapping (n,t)->smfn from the hash table */ { @@ -1606,10 +1606,8 @@ void shadow_hash_delete(struct domain *d { /* Need to search for the one we want */ x = d->arch.paging.shadow.hash_table[key]; - while ( 1 ) + while ( x ) { - ASSERT(x); /* We can't have hit the end, since our target is - * still in the chain somehwere... */ if ( next_shadow(x) == sp ) { x->next_shadow = sp->next_shadow; @@ -1617,10 +1615,14 @@ void shadow_hash_delete(struct domain *d } x = next_shadow(x); } + if ( !x ) + return false; } set_next_shadow(sp, NULL); sh_hash_audit_bucket(d, key); + + return true; } typedef int (*hash_vcpu_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn); --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -132,7 +132,8 @@ delete_fl1_shadow_status(struct domain * SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%"PRI_mfn"\n", gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); - shadow_hash_delete(d, gfn_x(gfn), SH_type_fl1_shadow, smfn); + if ( !shadow_hash_delete(d, gfn_x(gfn), SH_type_fl1_shadow, smfn) ) + ASSERT_UNREACHABLE(); } --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -375,7 +375,7 @@ shadow_size(unsigned int shadow_type) mfn_t shadow_hash_lookup(struct domain *d, unsigned long n, unsigned int t); void shadow_hash_insert(struct domain *d, unsigned long n, unsigned int t, mfn_t smfn); -void shadow_hash_delete(struct domain *d, +bool shadow_hash_delete(struct domain *d, unsigned long n, unsigned int t, mfn_t smfn); /* shadow promotion */ @@ -773,18 +773,19 @@ static inline void set_shadow_status(struct domain *d, mfn_t gmfn, u32 shadow_type, mfn_t smfn) /* Put a shadow into the hash table */ { - int res; - SHADOW_PRINTK("d%d gmfn=%lx, type=%08x, smfn=%lx\n", d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); /* 32-bit PV guests don't own their l4 pages so can't get_page them */ - if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow ) + if ( (shadow_type != SH_type_l4_64_shadow || !is_pv_32bit_domain(d)) && + !get_page(mfn_to_page(gmfn), d) ) { - res = get_page(mfn_to_page(gmfn), d); - ASSERT(res == 1); + printk(XENLOG_G_ERR "%pd: cannot get page for MFN %" PRI_mfn "\n", + d, mfn_x(gmfn)); + domain_crash(d); + return; } shadow_hash_insert(d, mfn_x(gmfn), shadow_type, smfn); @@ -797,9 +798,9 @@ delete_shadow_status(struct domain *d, m SHADOW_PRINTK("d%d gmfn=%"PRI_mfn", type=%08x, smfn=%"PRI_mfn"\n", d->domain_id, mfn_x(gmfn), shadow_type, mfn_x(smfn)); ASSERT(mfn_to_page(smfn)->u.sh.head); - shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn); - /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */ - if ( !is_pv_32bit_domain(d) || shadow_type != SH_type_l4_64_shadow ) + if ( shadow_hash_delete(d, mfn_x(gmfn), shadow_type, smfn) && + /* 32-bit PV guests don't own their l4 pages; see set_shadow_status */ + (shadow_type != SH_type_l4_64_shadow || !is_pv_32bit_domain(d)) ) put_page(mfn_to_page(gmfn)); }
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |