[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm/p2m: Fix locking discipline around log-dirty teardown.
# HG changeset patch # User Tim Deegan <Tim.Deegan@xxxxxxxxxx> # Date 1307017012 -3600 # Node ID 51f28a7cbb3d1e42e3592e5dc45041742a100fea # Parent c9ea54b4f49ad9363ef8f08fa984f2900dc147e0 x86/mm/p2m: Fix locking discipline around log-dirty teardown. It's not safe to call paging_free_log_dirty_page with the log-dirty lock held. Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- diff -r c9ea54b4f49a -r 51f28a7cbb3d xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Thu Jun 02 13:16:52 2011 +0100 +++ b/xen/arch/x86/mm/paging.c Thu Jun 02 13:16:52 2011 +0100 @@ -156,10 +156,16 @@ { mfn_t *l4, *l3, *l2; int i4, i3, i2; + struct page_list_head to_free; + struct page_info *pg, *tmp; if ( !mfn_valid(d->arch.paging.log_dirty.top) ) return; + INIT_PAGE_LIST_HEAD(&to_free); + + log_dirty_lock(d); + l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) @@ -178,22 +184,28 @@ for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ ) if ( mfn_valid(l2[i2]) ) - paging_free_log_dirty_page(d, l2[i2]); + page_list_add_tail(mfn_to_page(l2[i2]), &to_free); unmap_domain_page(l2); - paging_free_log_dirty_page(d, l3[i3]); + page_list_add_tail(mfn_to_page(l3[i3]), &to_free); } unmap_domain_page(l3); - paging_free_log_dirty_page(d, l4[i4]); + page_list_add_tail(mfn_to_page(l4[i4]), &to_free); } unmap_domain_page(l4); - paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); + page_list_add_tail(mfn_to_page(d->arch.paging.log_dirty.top), &to_free); d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); ASSERT(d->arch.paging.log_dirty.allocs == 0); d->arch.paging.log_dirty.failed_allocs = 0; + + log_dirty_unlock(d); + + /* Return the memory now that we're not holding the log-dirty lock */ + page_list_for_each_safe(pg, tmp, &to_free) + paging_free_log_dirty_page(d, page_to_mfn(pg)); } int paging_log_dirty_enable(struct domain *d) @@ -217,10 +229,8 @@ domain_pause(d); /* Safe because the domain is paused. */ ret = d->arch.paging.log_dirty.disable_log_dirty(d); - log_dirty_lock(d); if ( !paging_mode_log_dirty(d) ) paging_free_log_dirty_bitmap(d); - log_dirty_unlock(d); domain_unpause(d); return ret; @@ -672,10 +682,9 @@ /* This function fress log dirty bitmap resources. */ static void paging_log_dirty_teardown(struct domain*d) { - log_dirty_lock(d); paging_free_log_dirty_bitmap(d); - log_dirty_unlock(d); } + /************************************************/ /* CODE FOR PAGING SUPPORT */ /************************************************/ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |