[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 07/16] xenpaging: populate only paged-out pages
populdate a paged-out page only once to reduce pressure in the ringbuffer. Several cpus may still request a page at once. xenpaging can handle this. But: maybe this will miss pages in paging-out state? Signed-off-by: Olaf Hering <olaf@xxxxxxxxx> --- xen/arch/x86/hvm/emulate.c | 3 ++- xen/arch/x86/hvm/hvm.c | 15 ++++++++++----- xen/arch/x86/mm/guest_walk.c | 3 ++- xen/arch/x86/mm/hap/guest_walk.c | 6 ++++-- 4 files changed, 18 insertions(+), 9 deletions(-) --- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/hvm/emulate.c +++ xen-unstable.hg-4.1.22344/xen/arch/x86/hvm/emulate.c @@ -66,7 +66,8 @@ static int hvmemul_do_io( ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, ram_gfn); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, ram_gfn); return X86EMUL_RETRY; } if ( p2m_is_shared(p2mt) ) --- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/hvm/hvm.c +++ xen-unstable.hg-4.1.22344/xen/arch/x86/hvm/hvm.c @@ -345,7 +345,8 @@ static int hvm_set_ioreq_page( return -EINVAL; if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, gmfn); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, gmfn); return -ENOENT; } if ( p2m_is_shared(p2mt) ) @@ -1369,7 +1370,8 @@ static void *__hvm_map_guest_frame(unsig return NULL; if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, gfn); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, gfn); return NULL; } @@ -1811,7 +1813,8 @@ static enum hvm_copy_result __hvm_copy( if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, gfn); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, gfn); return HVMCOPY_gfn_paged_out; } if ( p2m_is_shared(p2mt) ) @@ -3118,7 +3121,8 @@ long do_hvm_op(unsigned long op, XEN_GUE mfn_t mfn = gfn_to_mfn(p2m, pfn, &t); if ( p2m_is_paging(t) ) { - p2m_mem_paging_populate(p2m, pfn); + if ( p2m_is_paged(t) ) + p2m_mem_paging_populate(p2m, pfn); rc = -EINVAL; goto param_fail3; @@ -3184,7 +3188,8 @@ long do_hvm_op(unsigned long op, XEN_GUE mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0); if ( p2m_is_paging(t) ) { - p2m_mem_paging_populate(p2m, pfn); + if ( p2m_is_paged(t) ) + p2m_mem_paging_populate(p2m, pfn); rc = -EINVAL; goto param_fail4; --- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/mm/guest_walk.c +++ xen-unstable.hg-4.1.22344/xen/arch/x86/mm/guest_walk.c @@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0); if ( p2m_is_paging(*p2mt) ) { - p2m_mem_paging_populate(p2m, gfn_x(gfn)); + if ( p2m_is_paged(*p2mt) ) + p2m_mem_paging_populate(p2m, gfn_x(gfn)); *rc = _PAGE_PAGED; return NULL; --- xen-unstable.hg-4.1.22344.orig/xen/arch/x86/mm/hap/guest_walk.c +++ xen-unstable.hg-4.1.22344/xen/arch/x86/mm/hap/guest_walk.c @@ -50,7 +50,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT); pfec[0] = PFEC_page_paged; return INVALID_GFN; @@ -82,7 +83,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, gfn_x(gfn)); + if ( p2m_is_paged(p2mt) ) + p2m_mem_paging_populate(p2m, gfn_x(gfn)); pfec[0] = PFEC_page_paged; return INVALID_GFN; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |