[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4 of 9] x86/mm: Check how many mfns are shared, in addition to how many are saved
xen/arch/x86/mm.c | 6 ------ xen/arch/x86/mm/mem_sharing.c | 29 +++++++++++++++++++++++++++++ xen/arch/x86/x86_64/compat/mm.c | 6 ++++++ xen/arch/x86/x86_64/mm.c | 7 +++++++ xen/include/asm-x86/mem_sharing.h | 1 + xen/include/public/memory.h | 1 + 6 files changed, 44 insertions(+), 6 deletions(-) This patch also moves the existing sharing-related memory op to the correct location, and adds logic to the audit() method that uses the new information. This patch only provides the Xen implementation of the domctls. Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxx> Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx> diff -r 4a189125d71e -r dbfabd1bbfb1 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -119,7 +119,6 @@ #include <xen/trace.h> #include <asm/setup.h> #include <asm/fixmap.h> -#include <asm/mem_sharing.h> /* * Mapping of first 2 or 4 megabytes of memory. This is mapped with 4kB @@ -5093,11 +5092,6 @@ long arch_memory_op(int op, XEN_GUEST_HA return rc; } -#ifdef __x86_64__ - case XENMEM_get_sharing_freed_pages: - return mem_sharing_get_nr_saved_mfns(); -#endif - default: return subarch_memory_op(op, arg); } diff -r 4a189125d71e -r dbfabd1bbfb1 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -72,6 +72,7 @@ static inline int mem_sharing_audit(void static shr_handle_t next_handle = 1; static atomic_t nr_saved_mfns = ATOMIC_INIT(0); +static atomic_t nr_shared_mfns = ATOMIC_INIT(0); typedef struct gfn_info { @@ -150,9 +151,12 @@ static struct page_info* mem_sharing_loo static int mem_sharing_audit(void) { int errors = 0; + unsigned long count_expected; + unsigned long count_found = 0; struct list_head *ae; ASSERT(shr_locked_by_me()); + count_expected = atomic_read(&nr_shared_mfns); list_for_each(ae, &shr_audit_list) { @@ -200,6 +204,10 @@ static int mem_sharing_audit(void) continue; } + /* Only increase our expected count for pages that are actually shared */ + if ( !list_has_one_entry(&pg->shared_info->gfns) ) + count_found++; + /* Check if all GFNs map to the MFN, and the p2m types */ list_for_each(le, &pg->shared_info->gfns) { @@ -245,6 +253,13 @@ static int mem_sharing_audit(void) } } + if ( count_found != count_expected ) + { + MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.", + count_expected, count_found); + errors++; + } + return errors; } #endif @@ -294,6 +309,11 @@ unsigned int mem_sharing_get_nr_saved_mf return ((unsigned int)atomic_read(&nr_saved_mfns)); } +unsigned int mem_sharing_get_nr_shared_mfns(void) +{ + return (unsigned int)atomic_read(&nr_shared_mfns); +} + int mem_sharing_sharing_resume(struct domain *d) { mem_event_response_t rsp; @@ -577,6 +597,14 @@ int mem_sharing_share_pages(struct domai ASSERT(list_empty(&cpage->shared_info->gfns)); if ( single_source_gfn ) atomic_inc(&sd->shr_pages); + /* We only increase the number of shared pages when + * sharing for the first time */ + if ( single_source_gfn && single_client_gfn ) + atomic_inc(&nr_shared_mfns); + /* And we decrease it when re-sharing already shared + * (because one of them becomes a "saved" page). */ + if ( !single_source_gfn && !single_client_gfn ) + atomic_dec(&nr_shared_mfns); /* Clear the rest of the shared state */ audit_del_list(cpage); @@ -659,6 +687,7 @@ gfn_found: BUG_ON(!d); atomic_dec(&d->shr_pages); put_domain(d); + atomic_dec(&nr_shared_mfns); } } else { /* Clean up shared state */ diff -r 4a189125d71e -r dbfabd1bbfb1 xen/arch/x86/x86_64/compat/mm.c --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -205,6 +205,12 @@ int compat_arch_memory_op(int op, XEN_GU break; } + case XENMEM_get_sharing_freed_pages: + return mem_sharing_get_nr_saved_mfns(); + + case XENMEM_get_sharing_shared_pages: + return mem_sharing_get_nr_shared_mfns(); + default: rc = -ENOSYS; break; diff -r 4a189125d71e -r dbfabd1bbfb1 xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -34,6 +34,7 @@ #include <asm/msr.h> #include <asm/setup.h> #include <asm/numa.h> +#include <asm/mem_sharing.h> #include <public/memory.h> /* Parameters for PFN/MADDR compression. */ @@ -1090,6 +1091,12 @@ long subarch_memory_op(int op, XEN_GUEST break; + case XENMEM_get_sharing_freed_pages: + return mem_sharing_get_nr_saved_mfns(); + + case XENMEM_get_sharing_shared_pages: + return mem_sharing_get_nr_shared_mfns(); + default: rc = -ENOSYS; break; diff -r 4a189125d71e -r dbfabd1bbfb1 xen/include/asm-x86/mem_sharing.h --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -45,6 +45,7 @@ struct page_sharing_info (is_hvm_domain(_d) && paging_mode_hap(_d)) unsigned int mem_sharing_get_nr_saved_mfns(void); +unsigned int mem_sharing_get_nr_shared_mfns(void); int mem_sharing_nominate_page(struct domain *d, unsigned long gfn, int expected_refcnt, diff -r 4a189125d71e -r dbfabd1bbfb1 xen/include/public/memory.h --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -294,6 +294,7 @@ typedef struct xen_pod_target xen_pod_ta * The call never fails. */ #define XENMEM_get_sharing_freed_pages 18 +#define XENMEM_get_sharing_shared_pages 19 #endif /* __XEN_PUBLIC_MEMORY_H__ */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |