|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 6 of 7] x86/mm: Refactor possibly deadlocking get_gfn calls
xen/arch/x86/hvm/emulate.c | 33 ++++++++++-------------
xen/arch/x86/mm/mem_sharing.c | 24 +++++++---------
xen/include/asm-x86/p2m.h | 60 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 85 insertions(+), 32 deletions(-)
When calling get_gfn multiple times on different gfn's in the same function, we
can easily deadlock if p2m lookups are locked. Thus, refactor these calls to
enforce simple deadlock-avoidance rules:
- Lowest-numbered domain first
- Lowest-numbered gfn first
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxx>
diff -r 316d227dd526 -r 7fe1bb9208df xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -660,12 +660,13 @@ static int hvmemul_rep_movs(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long saddr, daddr, bytes, sgfn, dgfn;
+ unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
- p2m_type_t p2mt;
+ p2m_type_t sp2mt, dp2mt;
int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
char *buf;
+ struct two_gfns tg;
rc = hvmemul_virtual_to_linear(
src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
@@ -693,26 +694,23 @@ static int hvmemul_rep_movs(
if ( rc != X86EMUL_OKAY )
return rc;
- /* XXX In a fine-grained p2m locking scenario, we need to sort this
- * get_gfn's, or else we might deadlock */
- sgfn = sgpa >> PAGE_SHIFT;
- (void)get_gfn(current->domain, sgfn, &p2mt);
- if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
+ get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL,
+ current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL,
+ p2m_guest, &tg);
+
+ if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) )
{
rc = hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
- put_gfn(current->domain, sgfn);
+ put_two_gfns(&tg);
return rc;
}
- dgfn = dgpa >> PAGE_SHIFT;
- (void)get_gfn(current->domain, dgfn, &p2mt);
- if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
+ if ( !p2m_is_ram(dp2mt) && !p2m_is_grant(dp2mt) )
{
rc = hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
- put_gfn(current->domain, sgfn);
- put_gfn(current->domain, dgfn);
+ put_two_gfns(&tg);
return rc;
}
@@ -730,8 +728,7 @@ static int hvmemul_rep_movs(
*/
if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
{
- put_gfn(current->domain, sgfn);
- put_gfn(current->domain, dgfn);
+ put_two_gfns(&tg);
return X86EMUL_UNHANDLEABLE;
}
@@ -743,8 +740,7 @@ static int hvmemul_rep_movs(
buf = xmalloc_bytes(bytes);
if ( buf == NULL )
{
- put_gfn(current->domain, sgfn);
- put_gfn(current->domain, dgfn);
+ put_two_gfns(&tg);
return X86EMUL_UNHANDLEABLE;
}
@@ -757,8 +753,7 @@ static int hvmemul_rep_movs(
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
xfree(buf);
- put_gfn(current->domain, sgfn);
- put_gfn(current->domain, dgfn);
+ put_two_gfns(&tg);
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
diff -r 316d227dd526 -r 7fe1bb9208df xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -727,11 +727,11 @@ int mem_sharing_share_pages(struct domai
int ret = -EINVAL;
mfn_t smfn, cmfn;
p2m_type_t smfn_type, cmfn_type;
+ struct two_gfns tg;
- /* XXX if sd == cd handle potential deadlock by ordering
- * the get_ and put_gfn's */
- smfn = get_gfn(sd, sgfn, &smfn_type);
- cmfn = get_gfn(cd, cgfn, &cmfn_type);
+ get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
+ cd, cgfn, &cmfn_type, NULL, &cmfn,
+ p2m_query, &tg);
/* This tricky business is to avoid two callers deadlocking if
* grabbing pages in opposite client/source order */
@@ -828,8 +828,7 @@ int mem_sharing_share_pages(struct domai
ret = 0;
err_out:
- put_gfn(cd, cgfn);
- put_gfn(sd, sgfn);
+ put_two_gfns(&tg);
return ret;
}
@@ -843,11 +842,11 @@ int mem_sharing_add_to_physmap(struct do
struct gfn_info *gfn_info;
struct p2m_domain *p2m = p2m_get_hostp2m(cd);
p2m_access_t a;
-
- /* XXX if sd == cd handle potential deadlock by ordering
- * the get_ and put_gfn's */
- smfn = get_gfn_query(sd, sgfn, &smfn_type);
- cmfn = get_gfn_type_access(p2m, cgfn, &cmfn_type, &a, p2m_query, NULL);
+ struct two_gfns tg;
+
+ get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
+ cd, cgfn, &cmfn_type, &a, &cmfn,
+ p2m_query, &tg);
/* Get the source shared page, check and lock */
ret = XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID;
@@ -897,8 +896,7 @@ int mem_sharing_add_to_physmap(struct do
err_unlock:
mem_sharing_page_unlock(spage);
err_out:
- put_gfn(cd, cgfn);
- put_gfn(sd, sgfn);
+ put_two_gfns(&tg);
return ret;
}
diff -r 316d227dd526 -r 7fe1bb9208df xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -378,6 +378,66 @@ static inline unsigned long mfn_to_gfn(s
return mfn_x(mfn);
}
+/* Deadlock-avoidance scheme when calling get_gfn on different gfn's */
+struct two_gfns {
+ struct domain *first_domain;
+ unsigned long first_gfn;
+ struct domain *second_domain;
+ unsigned long second_gfn;
+};
+
+#define assign_pointers(dest, source) \
+do { \
+ dest ## _mfn = (source ## mfn) ? (source ## mfn) : &__ ## dest ## _mfn; \
+ dest ## _a = (source ## a) ? (source ## a) : &__ ## dest ## _a; \
+ dest ## _t = (source ## t) ? (source ## t) : &__ ## dest ## _t; \
+} while(0)
+
+/* Returns mfn, type and access for potential caller consumption, but any
+ * of those can be NULL */
+static inline void get_two_gfns(struct domain *rd, unsigned long rgfn,
+ p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld,
+ unsigned long lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn,
+ p2m_query_t q, struct two_gfns *rval)
+{
+ mfn_t *first_mfn, *second_mfn, __first_mfn, __second_mfn;
+ p2m_access_t *first_a, *second_a, __first_a, __second_a;
+ p2m_type_t *first_t, *second_t, __first_t, __second_t;
+
+ /* Sort by domain, if same domain by gfn */
+ if ( (rd->domain_id <= ld->domain_id) || ((rd == ld) && (rgfn <= lgfn)) )
+ {
+ rval->first_domain = rd;
+ rval->first_gfn = rgfn;
+ rval->second_domain = ld;
+ rval->second_gfn = lgfn;
+ assign_pointers(first, r);
+ assign_pointers(second, l);
+ } else {
+ rval->first_domain = ld;
+ rval->first_gfn = lgfn;
+ rval->second_domain = rd;
+ rval->second_gfn = rgfn;
+ assign_pointers(first, l);
+ assign_pointers(second, r);
+ }
+
+ /* Now do the gets */
+ *first_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
+ rval->first_gfn, first_t, first_a, q,
NULL);
+ *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
+ rval->second_gfn, second_t, second_a, q,
NULL);
+}
+
+static inline void put_two_gfns(struct two_gfns *arg)
+{
+ if ( !arg )
+ return;
+
+ put_gfn(arg->second_domain, arg->second_gfn);
+ put_gfn(arg->first_domain, arg->first_gfn);
+}
+
/* Init the datastructures for later use by the p2m code */
int p2m_init(struct domain *d);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |