[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] add page_get_owner_and_reference() related ASSERT()s



commit d1f1380591e6abe7101228346158e15248cc4cb5
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Aug 13 14:47:06 2015 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Aug 13 14:47:06 2015 +0200

    add page_get_owner_and_reference() related ASSERT()s
    
    The function shouldn't return NULL after having obtained a reference,
    or else the caller won't know to drop it.
    
    Also its result shouldn't be ignored - if calling code is certain that
    a page already has a non-zero refcount, it better ASSERT()s so.
    
    Finally this as well as get_page() and put_page() are required to be
    available on all architectures - move the declarations to xen/mm.h.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
    Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Release-acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/arm/guestcopy.c |    4 +---
 xen/arch/arm/mm.c        |    6 +++++-
 xen/arch/x86/mm.c        |    6 +++++-
 xen/common/grant_table.c |    7 ++++++-
 xen/include/asm-arm/mm.h |    4 ----
 xen/include/asm-x86/mm.h |    3 ---
 xen/include/xen/mm.h     |    6 +++++-
 7 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7dbaeca..ce1c3c3 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -1,10 +1,8 @@
-#include <xen/config.h>
 #include <xen/lib.h>
 #include <xen/domain_page.h>
+#include <xen/mm.h>
 #include <xen/sched.h>
 #include <asm/current.h>
-
-#include <asm/mm.h>
 #include <asm/guest_access.h>
 
 static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index ae0f34c..d6f64cc 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1170,6 +1170,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
arg)
 struct domain *page_get_owner_and_reference(struct page_info *page)
 {
     unsigned long x, y = page->count_info;
+    struct domain *owner;
 
     do {
         x = y;
@@ -1182,7 +1183,10 @@ struct domain *page_get_owner_and_reference(struct 
page_info *page)
     }
     while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
 
-    return page_get_owner(page);
+    owner = page_get_owner(page);
+    ASSERT(owner);
+
+    return owner;
 }
 
 void put_page(struct page_info *page)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 4b76587..358eb3a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2038,6 +2038,7 @@ void put_page(struct page_info *page)
 struct domain *page_get_owner_and_reference(struct page_info *page)
 {
     unsigned long x, y = page->count_info;
+    struct domain *owner;
 
     do {
         x = y;
@@ -2051,7 +2052,10 @@ struct domain *page_get_owner_and_reference(struct 
page_info *page)
     }
     while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
 
-    return page_get_owner(page);
+    owner = page_get_owner(page);
+    ASSERT(owner);
+
+    return owner;
 }
 
 
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index f2ed64a..2b449d5 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2244,7 +2244,12 @@ __acquire_grant_for_copy(
     {
         ASSERT(mfn_valid(act->frame));
         *page = mfn_to_page(act->frame);
-        (void)page_get_owner_and_reference(*page);
+        td = page_get_owner_and_reference(*page);
+        /*
+         * act->pin being non-zero should guarantee the page to have a
+         * non-zero refcount and hence a valid owner.
+         */
+        ASSERT(td);
     }
 
     act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 2e1f21a..a95082e 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -275,10 +275,6 @@ static inline void *page_to_virt(const struct page_info 
*pg)
     return mfn_to_virt(page_to_mfn(pg));
 }
 
-struct domain *page_get_owner_and_reference(struct page_info *page);
-void put_page(struct page_info *page);
-int  get_page(struct page_info *page, struct domain *domain);
-
 struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
                                     unsigned long flags);
 
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 8595c38..67b34c6 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -352,9 +352,6 @@ const unsigned long *get_platform_badpages(unsigned int 
*array_size);
 int page_lock(struct page_info *page);
 void page_unlock(struct page_info *page);
 
-struct domain *page_get_owner_and_reference(struct page_info *page);
-void put_page(struct page_info *page);
-int  get_page(struct page_info *page, struct domain *domain);
 void put_page_type(struct page_info *page);
 int  get_page_type(struct page_info *page, unsigned long type);
 int  put_page_type_preemptible(struct page_info *page);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 876d370..5d4b64b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -45,6 +45,7 @@
 #ifndef __XEN_MM_H__
 #define __XEN_MM_H__
 
+#include <xen/compiler.h>
 #include <xen/types.h>
 #include <xen/list.h>
 #include <xen/spinlock.h>
@@ -77,9 +78,12 @@ TYPE_SAFE(unsigned long, pfn);
 #undef pfn_t
 #endif
 
-struct domain;
 struct page_info;
 
+void put_page(struct page_info *);
+int get_page(struct page_info *, struct domain *);
+struct domain *__must_check page_get_owner_and_reference(struct page_info *);
+
 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
 void init_boot_pages(paddr_t ps, paddr_t pe);
 unsigned long alloc_boot_pages(
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.