[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86: use paging_mark_pfn_dirty()
commit c5e53aa67a76c0365e754a847429b838509d9144 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Feb 13 17:29:50 2018 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Feb 13 17:29:50 2018 +0100 x86: use paging_mark_pfn_dirty() ... in preference over paging_mark_dirty(), when the PFN is known anyway. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- xen/arch/x86/hvm/dm.c | 6 ++---- xen/arch/x86/hvm/hvm.c | 6 +++--- xen/arch/x86/hvm/ioreq.c | 2 +- xen/arch/x86/mm.c | 3 +-- xen/arch/x86/mm/p2m-pod.c | 2 +- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index 8083ded..a96d5eb 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -220,14 +220,12 @@ static int modified_memory(struct domain *d, page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); if ( page ) { - mfn_t gmfn = _mfn(page_to_mfn(page)); - - paging_mark_dirty(d, gmfn); + paging_mark_pfn_dirty(d, _pfn(pfn)); /* * These are most probably not page tables any more * don't take a long time and don't die either. */ - sh_remove_shadows(d, gmfn, 1, 0); + sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0); put_page(page); } } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 18d721d..91bc3e8 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1897,7 +1897,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, */ if ( npfec.write_access ) { - paging_mark_dirty(currd, mfn); + paging_mark_pfn_dirty(currd, _pfn(gfn)); /* * If p2m is really an altp2m, unlock here to avoid lock ordering * violation when the change below is propagated from host p2m. @@ -2582,7 +2582,7 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent, if ( unlikely(p2m_is_discard_write(p2mt)) ) *writable = 0; else if ( !permanent ) - paging_mark_dirty(d, _mfn(page_to_mfn(page))); + paging_mark_pfn_dirty(d, _pfn(gfn)); } if ( !permanent ) @@ -3245,7 +3245,7 @@ static enum hvm_translation_result __hvm_copy( memcpy(p, buf, count); else memset(p, 0, count); - paging_mark_dirty(v->domain, _mfn(page_to_mfn(page))); + paging_mark_pfn_dirty(v->domain, _pfn(gfn_x(gfn))); } } else diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 5aeaaac..7e66965 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -283,7 +283,7 @@ static int hvm_add_ioreq_gfn( rc = guest_physmap_add_page(d, _gfn(iorp->gfn), _mfn(page_to_mfn(iorp->page)), 0); if ( rc == 0 ) - paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page))); + paging_mark_pfn_dirty(d, _pfn(iorp->gfn)); return rc; } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 86942c7..e1f089b 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3775,8 +3775,7 @@ long do_mmu_update( } set_gpfn_from_mfn(mfn, gpfn); - - paging_mark_dirty(pg_owner, _mfn(mfn)); + paging_mark_pfn_dirty(pg_owner, _pfn(gpfn)); put_page(page); break; diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c index b581421..fa13e07 100644 --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -1215,7 +1215,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, for( i = 0; i < (1UL << order); i++ ) { set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i); - paging_mark_dirty(d, mfn_add(mfn, i)); + paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn_aligned) + i)); } p2m->pod.entry_count -= (1UL << order); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |