|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 04 of 12] x86/mm: Enforce lock ordering for sharing page locks
xen/arch/x86/mm/mem_sharing.c | 91 ++++++++++++++++++++++++++----------------
xen/arch/x86/mm/mm-locks.h | 18 ++++++++-
xen/include/asm-x86/mm.h | 3 +-
3 files changed, 76 insertions(+), 36 deletions(-)
Use the ordering constructs in mm-locks.h to enforce an order
for the p2m and page locks in the sharing code. Applies to either
the global sharing lock (in audit mode) or the per page locks.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scanneell <adin@xxxxxxxxxxx>
diff -r 11916fe20dd2 -r c906c616d5ac xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -37,6 +37,14 @@
static shr_handle_t next_handle = 1;
+typedef struct pg_lock_data {
+ int mm_unlock_level;
+ unsigned short recurse_count;
+} pg_lock_data_t;
+
+#define DECLARE_PG_LOCK_DATA(name) \
+ pg_lock_data_t name = { 0, 0 };
+
#if MEM_SHARING_AUDIT
static mm_lock_t shr_lock;
@@ -63,11 +71,12 @@ static inline void audit_del_list(struct
list_del(&page->shared_info->entry);
}
-static inline int mem_sharing_page_lock(struct page_info *p)
+static inline int mem_sharing_page_lock(struct page_info *p,
+ pg_lock_data_t *l)
{
return 1;
}
-#define mem_sharing_page_unlock(p) ((void)0)
+#define mem_sharing_page_unlock(p, l) ((void)0)
#define get_next_handle() next_handle++;
#else
@@ -85,19 +94,26 @@ static inline int mem_sharing_audit(void
#define audit_add_list(p) ((void)0)
#define audit_del_list(p) ((void)0)
-static inline int mem_sharing_page_lock(struct page_info *pg)
+static inline int mem_sharing_page_lock(struct page_info *pg,
+ pg_lock_data_t *pld)
{
int rc;
+ page_sharing_mm_pre_lock();
rc = page_lock(pg);
if ( rc )
{
preempt_disable();
+ page_sharing_mm_post_lock(&pld->mm_unlock_level,
+ &pld->recurse_count);
}
return rc;
}
-static inline void mem_sharing_page_unlock(struct page_info *pg)
+static inline void mem_sharing_page_unlock(struct page_info *pg,
+ pg_lock_data_t *pld)
{
+ page_sharing_mm_unlock(pld->mm_unlock_level,
+ &pld->recurse_count);
preempt_enable();
page_unlock(pg);
}
@@ -492,7 +508,8 @@ static int page_make_sharable(struct dom
return 0;
}
-static int page_make_private(struct domain *d, struct page_info *page)
+static int page_make_private(struct domain *d, struct page_info *page,
+ pg_lock_data_t *pld)
{
unsigned long expected_type;
@@ -520,9 +537,11 @@ static int page_make_private(struct doma
/* Drop the final typecount */
put_page_and_type(page);
-#ifndef MEM_SHARING_AUDIT
+#if MEM_SHARING_AUDIT
+ (void)pld;
+#else
/* Now that we've dropped the type, we can unlock */
- mem_sharing_page_unlock(page);
+ mem_sharing_page_unlock(page, pld);
#endif
/* Change the owner */
@@ -538,7 +557,8 @@ static int page_make_private(struct doma
return 0;
}
-static inline struct page_info *__grab_shared_page(mfn_t mfn)
+static inline struct page_info *__grab_shared_page(mfn_t mfn,
+ pg_lock_data_t *pld)
{
struct page_info *pg = NULL;
@@ -548,12 +568,12 @@ static inline struct page_info *__grab_s
/* If the page is not validated we can't lock it, and if it's
* not validated it's obviously not shared. */
- if ( !mem_sharing_page_lock(pg) )
+ if ( !mem_sharing_page_lock(pg, pld) )
return NULL;
if ( mem_sharing_lookup(mfn_x(mfn)) == NULL )
{
- mem_sharing_page_unlock(pg);
+ mem_sharing_page_unlock(pg, pld);
return NULL;
}
@@ -570,6 +590,7 @@ int mem_sharing_nominate_page(struct dom
struct page_info *page = NULL; /* gcc... */
int ret;
struct gfn_info *gfn_info;
+ DECLARE_PG_LOCK_DATA(pld);
*phandle = 0UL;
@@ -583,7 +604,7 @@ int mem_sharing_nominate_page(struct dom
/* Return the handle if the page is already shared */
if ( p2m_is_shared(p2mt) ) {
- struct page_info *pg = __grab_shared_page(mfn);
+ struct page_info *pg = __grab_shared_page(mfn, &pld);
if ( !pg )
{
gdprintk(XENLOG_ERR, "Shared p2m entry gfn %lx, but could not "
@@ -592,7 +613,7 @@ int mem_sharing_nominate_page(struct dom
}
*phandle = pg->shared_info->handle;
ret = 0;
- mem_sharing_page_unlock(pg);
+ mem_sharing_page_unlock(pg, &pld);
goto out;
}
@@ -610,7 +631,7 @@ int mem_sharing_nominate_page(struct dom
* race because we're holding the p2m entry, so no one else
* could be nominating this gfn */
ret = -ENOENT;
- if ( !mem_sharing_page_lock(page) )
+ if ( !mem_sharing_page_lock(page, &pld) )
goto out;
/* Initialize the shared state */
@@ -619,7 +640,7 @@ int mem_sharing_nominate_page(struct dom
xmalloc(struct page_sharing_info)) == NULL )
{
/* Making a page private atomically unlocks it */
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page, &pld) != 0);
goto out;
}
page->shared_info->pg = page;
@@ -633,7 +654,7 @@ int mem_sharing_nominate_page(struct dom
{
xfree(page->shared_info);
page->shared_info = NULL;
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page, &pld) != 0);
goto out;
}
@@ -648,7 +669,7 @@ int mem_sharing_nominate_page(struct dom
xfree(page->shared_info);
page->shared_info = NULL;
/* NOTE: We haven't yet added this to the audit list. */
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page, &pld) != 0);
goto out;
}
@@ -657,7 +678,7 @@ int mem_sharing_nominate_page(struct dom
*phandle = page->shared_info->handle;
audit_add_list(page);
- mem_sharing_page_unlock(page);
+ mem_sharing_page_unlock(page, &pld);
ret = 0;
out:
@@ -676,6 +697,7 @@ int mem_sharing_share_pages(struct domai
int ret = -EINVAL;
mfn_t smfn, cmfn;
p2m_type_t smfn_type, cmfn_type;
+ DECLARE_PG_LOCK_DATA(pld);
shr_lock();
@@ -699,28 +721,28 @@ int mem_sharing_share_pages(struct domai
else if ( mfn_x(smfn) < mfn_x(cmfn) )
{
ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
- spage = firstpg = __grab_shared_page(smfn);
+ spage = firstpg = __grab_shared_page(smfn, &pld);
if ( spage == NULL )
goto err_out;
ret = XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID;
- cpage = secondpg = __grab_shared_page(cmfn);
+ cpage = secondpg = __grab_shared_page(cmfn, &pld);
if ( cpage == NULL )
{
- mem_sharing_page_unlock(spage);
+ mem_sharing_page_unlock(spage, &pld);
goto err_out;
}
} else {
ret = XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID;
- cpage = firstpg = __grab_shared_page(cmfn);
+ cpage = firstpg = __grab_shared_page(cmfn, &pld);
if ( cpage == NULL )
goto err_out;
ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
- spage = secondpg = __grab_shared_page(smfn);
+ spage = secondpg = __grab_shared_page(smfn, &pld);
if ( spage == NULL )
{
- mem_sharing_page_unlock(cpage);
+ mem_sharing_page_unlock(cpage, &pld);
goto err_out;
}
}
@@ -732,15 +754,15 @@ int mem_sharing_share_pages(struct domai
if ( spage->shared_info->handle != sh )
{
ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
- mem_sharing_page_unlock(secondpg);
- mem_sharing_page_unlock(firstpg);
+ mem_sharing_page_unlock(secondpg, &pld);
+ mem_sharing_page_unlock(firstpg, &pld);
goto err_out;
}
if ( cpage->shared_info->handle != ch )
{
ret = XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID;
- mem_sharing_page_unlock(secondpg);
- mem_sharing_page_unlock(firstpg);
+ mem_sharing_page_unlock(secondpg, &pld);
+ mem_sharing_page_unlock(firstpg, &pld);
goto err_out;
}
@@ -767,8 +789,8 @@ int mem_sharing_share_pages(struct domai
xfree(cpage->shared_info);
cpage->shared_info = NULL;
- mem_sharing_page_unlock(secondpg);
- mem_sharing_page_unlock(firstpg);
+ mem_sharing_page_unlock(secondpg, &pld);
+ mem_sharing_page_unlock(firstpg, &pld);
/* Free the client page */
if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
@@ -796,6 +818,7 @@ int mem_sharing_unshare_page(struct doma
int last_gfn;
gfn_info_t *gfn_info = NULL;
struct list_head *le;
+ DECLARE_PG_LOCK_DATA(pld);
/* This is one of the reasons why we can't enforce ordering
* between shr_lock and p2m fine-grained locks in mm-lock.
@@ -811,7 +834,7 @@ int mem_sharing_unshare_page(struct doma
return 0;
}
- page = __grab_shared_page(mfn);
+ page = __grab_shared_page(mfn, &pld);
if ( page == NULL )
{
gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: "
@@ -848,7 +871,7 @@ gfn_found:
if ( flags & MEM_SHARING_DESTROY_GFN )
{
put_page_and_type(page);
- mem_sharing_page_unlock(page);
+ mem_sharing_page_unlock(page, &pld);
if ( last_gfn &&
test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
@@ -861,7 +884,7 @@ gfn_found:
if ( last_gfn )
{
/* Making a page private atomically unlocks it */
- BUG_ON(page_make_private(d, page) != 0);
+ BUG_ON(page_make_private(d, page, &pld) != 0);
goto private_page_found;
}
@@ -869,7 +892,7 @@ gfn_found:
page = alloc_domheap_page(d, 0);
if ( !page )
{
- mem_sharing_page_unlock(old_page);
+ mem_sharing_page_unlock(old_page, &pld);
put_gfn(d, gfn);
mem_sharing_notify_helper(d, gfn);
shr_unlock();
@@ -883,7 +906,7 @@ gfn_found:
unmap_domain_page(t);
BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) == 0);
- mem_sharing_page_unlock(old_page);
+ mem_sharing_page_unlock(old_page, &pld);
put_page_and_type(old_page);
private_page_found:
diff -r 11916fe20dd2 -r c906c616d5ac xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -156,7 +156,23 @@ declare_mm_lock(shr)
#else
-/* We use an efficient per-page lock when AUDIT is not enabled. */
+/* Sharing per page lock
+ *
+ * This is an external lock, not represented by an mm_lock_t. The memory
+ * sharing lock uses it to protect addition and removal of (gfn,domain)
+ * tuples to a shared page. We enforce order here against the p2m lock,
+ * which is taken after the page_lock to change the gfn's p2m entry.
+ *
+ * Note that in sharing audit mode, we use the global page lock above,
+ * instead.
+ *
+ * The lock is recursive because during share we lock two pages. */
+
+declare_mm_order_constraint(per_page_sharing)
+#define page_sharing_mm_pre_lock()
mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_post_lock(l, r) \
+ mm_enforce_order_lock_post_per_page_sharing((l), (r))
+#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
#endif /* MEM_SHARING_AUDIT */
diff -r 11916fe20dd2 -r c906c616d5ac xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -351,7 +351,8 @@ void clear_superpage_mark(struct page_in
* backing. Nesting may happen when sharing (and locking) two pages --
deadlock
* is avoided by locking pages in increasing order.
* Memory sharing may take the p2m_lock within a page_lock/unlock
- * critical section.
+ * critical section. We enforce ordering between page_lock and p2m_lock using
an
+ * mm-locks.h construct.
*
* These two users (pte serialization and memory sharing) do not collide, since
* sharing is only supported for hvm guests, which do not perform pv pte
updates.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |