[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: move mfn_is_dirty along with the rest of the log-dirty code
# HG changeset patch # User Keir Fraser <keir@xxxxxxx> # Date 1292415117 0 # Node ID 6ed80a93a5e031c5ffa13dee7d6dcecbbab42b67 # Parent d8279118b4bbb3bb3689b9c20abb25d0c09e2b69 x86/mm: move mfn_is_dirty along with the rest of the log-dirty code Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- xen/arch/x86/mm/paging.c | 56 +++++++++++++++++++++++++++++++++++ xen/arch/x86/mm/shadow/multi.c | 2 - xen/arch/x86/mm/shadow/private.h | 61 --------------------------------------- xen/include/asm-x86/paging.h | 3 + 4 files changed, 60 insertions(+), 62 deletions(-) diff -r d8279118b4bb -r 6ed80a93a5e0 xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Wed Dec 15 12:10:31 2010 +0000 +++ b/xen/arch/x86/mm/paging.c Wed Dec 15 12:11:57 2010 +0000 @@ -303,6 +303,62 @@ void paging_mark_dirty(struct domain *d, out: log_dirty_unlock(d); } + + +/* Is this guest page dirty? */ +int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) +{ + unsigned long pfn; + mfn_t mfn, *l4, *l3, *l2; + unsigned long *l1; + int rv; + + ASSERT(paging_mode_log_dirty(d)); + + /* We /really/ mean PFN here, even for non-translated guests. */ + pfn = get_gpfn_from_mfn(mfn_x(gmfn)); + /* Page sharing not supported for shadow domains */ + BUG_ON(SHARED_M2P(pfn)); + if ( unlikely(!VALID_M2P(pfn)) ) + return 0; + + if ( d->arch.paging.log_dirty.failed_allocs > 0 ) + /* If we have any failed allocations our dirty log is bogus. + * Since we can't signal an error here, be conservative and + * report "dirty" in this case. (The only current caller, + * _sh_propagate, leaves known-dirty pages writable, preventing + * subsequent dirty-logging faults from them.) + */ + return 1; + + l4 = paging_map_log_dirty_bitmap(d); + if ( !l4 ) + return 0; + + mfn = l4[L4_LOGDIRTY_IDX(pfn)]; + unmap_domain_page(l4); + if ( !mfn_valid(mfn) ) + return 0; + + l3 = map_domain_page(mfn_x(mfn)); + mfn = l3[L3_LOGDIRTY_IDX(pfn)]; + unmap_domain_page(l3); + if ( !mfn_valid(mfn) ) + return 0; + + l2 = map_domain_page(mfn_x(mfn)); + mfn = l2[L2_LOGDIRTY_IDX(pfn)]; + unmap_domain_page(l2); + if ( !mfn_valid(mfn) ) + return 0; + + l1 = map_domain_page(mfn_x(mfn)); + rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1); + unmap_domain_page(l1); + + return rv; +} + /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, * clear the bitmap and stats as well. */ diff -r d8279118b4bb -r 6ed80a93a5e0 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Wed Dec 15 12:10:31 2010 +0000 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Dec 15 12:11:57 2010 +0000 @@ -657,7 +657,7 @@ _sh_propagate(struct vcpu *v, if ( mfn_valid(target_mfn) ) { if ( ft & FETCH_TYPE_WRITE ) paging_mark_dirty(d, mfn_x(target_mfn)); - else if ( !sh_mfn_is_dirty(d, target_mfn) ) + else if ( !paging_mfn_is_dirty(d, target_mfn) ) sflags &= ~_PAGE_RW; } } diff -r d8279118b4bb -r 6ed80a93a5e0 xen/arch/x86/mm/shadow/private.h --- a/xen/arch/x86/mm/shadow/private.h Wed Dec 15 12:10:31 2010 +0000 +++ b/xen/arch/x86/mm/shadow/private.h Wed Dec 15 12:11:57 2010 +0000 @@ -568,67 +568,6 @@ sh_unmap_domain_page_global(void *p) unmap_domain_page_global(p); } -/****************************************************************************** - * Log-dirty mode bitmap handling - */ - -extern void sh_mark_dirty(struct domain *d, mfn_t gmfn); - -static inline int -sh_mfn_is_dirty(struct domain *d, mfn_t gmfn) -/* Is this guest page dirty? Call only in log-dirty mode. */ -{ - unsigned long pfn; - mfn_t mfn, *l4, *l3, *l2; - unsigned long *l1; - int rv; - - ASSERT(shadow_mode_log_dirty(d)); - - /* We /really/ mean PFN here, even for non-translated guests. */ - pfn = get_gpfn_from_mfn(mfn_x(gmfn)); - /* Page sharing not supported for shadow domains */ - BUG_ON(SHARED_M2P(pfn)); - if ( unlikely(!VALID_M2P(pfn)) ) - return 0; - - if ( d->arch.paging.log_dirty.failed_allocs > 0 ) - /* If we have any failed allocations our dirty log is bogus. - * Since we can't signal an error here, be conservative and - * report "dirty" in this case. (The only current caller, - * _sh_propagate, leaves known-dirty pages writable, preventing - * subsequent dirty-logging faults from them.) - */ - return 1; - - l4 = paging_map_log_dirty_bitmap(d); - if ( !l4 ) - return 0; - - mfn = l4[L4_LOGDIRTY_IDX(pfn)]; - unmap_domain_page(l4); - if ( !mfn_valid(mfn) ) - return 0; - - l3 = map_domain_page(mfn_x(mfn)); - mfn = l3[L3_LOGDIRTY_IDX(pfn)]; - unmap_domain_page(l3); - if ( !mfn_valid(mfn) ) - return 0; - - l2 = map_domain_page(mfn_x(mfn)); - mfn = l2[L2_LOGDIRTY_IDX(pfn)]; - unmap_domain_page(l2); - if ( !mfn_valid(mfn) ) - return 0; - - l1 = map_domain_page(mfn_x(mfn)); - rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1); - unmap_domain_page(l1); - - return rv; -} - /**************************************************************************/ /* Shadow-page refcounting. */ diff -r d8279118b4bb -r 6ed80a93a5e0 xen/include/asm-x86/paging.h --- a/xen/include/asm-x86/paging.h Wed Dec 15 12:10:31 2010 +0000 +++ b/xen/include/asm-x86/paging.h Wed Dec 15 12:11:57 2010 +0000 @@ -161,6 +161,9 @@ void paging_log_dirty_init(struct domain /* mark a page as dirty */ void paging_mark_dirty(struct domain *d, unsigned long guest_mfn); +/* is this guest page dirty? */ +int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn); + /* * Log-dirty radix tree indexing: * All tree nodes are PAGE_SIZE bytes, mapped on-demand. _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |