|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] xen/x86: Fix memory leak in vcpu_create() error path
Various paths in vcpu_create() end up calling paging_update_paging_modes(),
which eventually allocate a monitor pagetable if one doesn't exist.
However, an error in vcpu_create() results in the vcpu being cleaned up
locally, and not put onto the domain's vcpu list. Therefore, the monitor
table is not freed by {hap,shadow}_teardown()'s loop. This is caught by
assertions later that we've successfully freed the entire hap/shadow memory
pool.
The per-vcpu loops in domain teardown logic is conceptually wrong, but exist
due to insufficient existing structure in the existing logic.
Break paging_vcpu_teardown() out of paging_teardown(), with mirrored breakouts
in the hap/shadow code, and use it from arch_vcpu_create()'s error path. This
fixes the memory leak.
The new {hap,shadow}_vcpu_teardown() must be idempotent, and are written to be
as tolerable as possible, with the minimum number of safety checks possible.
In particular, drop the mfn_valid() check - if junk is in these fields, then
Xen is going to explode anyway.
Reported-by: Michał Leszczyński <michal.leszczynski@xxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Michał Leszczyński <michal.leszczynski@xxxxxxx>
This is a minimal patch which ought to be safe to backport. The whole cleanup
infrastructure is a mess.
---
xen/arch/x86/domain.c | 1 +
xen/arch/x86/mm/hap/hap.c | 39 ++++++++++++++++++-------------
xen/arch/x86/mm/paging.c | 8 +++++++
xen/arch/x86/mm/shadow/common.c | 52 ++++++++++++++++++++++++-----------------
xen/include/asm-x86/hap.h | 1 +
xen/include/asm-x86/paging.h | 3 ++-
xen/include/asm-x86/shadow.h | 3 ++-
7 files changed, 67 insertions(+), 40 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index e8e91cf080..b8f5b1f5b4 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -603,6 +603,7 @@ int arch_vcpu_create(struct vcpu *v)
return rc;
fail:
+ paging_vcpu_teardown(v);
vcpu_destroy_fpu(v);
xfree(v->arch.msrs);
v->arch.msrs = NULL;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 4eedd1a995..737821a166 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -563,30 +563,37 @@ void hap_final_teardown(struct domain *d)
paging_unlock(d);
}
+void hap_vcpu_teardown(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ mfn_t mfn;
+
+ paging_lock(d);
+
+ if ( !paging_mode_hap(d) || !v->arch.paging.mode )
+ goto out;
+
+ mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+ if ( mfn_x(mfn) )
+ hap_destroy_monitor_table(v, mfn);
+ v->arch.hvm.monitor_table = pagetable_null();
+
+ out:
+ paging_unlock(d);
+}
+
void hap_teardown(struct domain *d, bool *preempted)
{
struct vcpu *v;
- mfn_t mfn;
ASSERT(d->is_dying);
ASSERT(d != current->domain);
- paging_lock(d); /* Keep various asserts happy */
+ /* TODO - Remove when the teardown path is better structured. */
+ for_each_vcpu ( d, v )
+ hap_vcpu_teardown(v);
- if ( paging_mode_enabled(d) )
- {
- /* release the monitor table held by each vcpu */
- for_each_vcpu ( d, v )
- {
- if ( paging_get_hostmode(v) && paging_mode_external(d) )
- {
- mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
- if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
- hap_destroy_monitor_table(v, mfn);
- v->arch.hvm.monitor_table = pagetable_null();
- }
- }
- }
+ paging_lock(d); /* Keep various asserts happy */
if ( d->arch.paging.hap.total_pages != 0 )
{
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 695372783d..d5e967fcd5 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -794,6 +794,14 @@ long
paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
}
#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+void paging_vcpu_teardown(struct vcpu *v)
+{
+ if ( hap_enabled(v->domain) )
+ hap_vcpu_teardown(v);
+ else
+ shadow_vcpu_teardown(v);
+}
+
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
{
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 7c7204fd34..ea51068530 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2775,6 +2775,32 @@ int shadow_enable(struct domain *d, u32 mode)
return rv;
}
+void shadow_vcpu_teardown(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+
+ paging_lock(d);
+
+ if ( !paging_mode_shadow(d) || !v->arch.paging.mode )
+ goto out;
+
+ v->arch.paging.mode->shadow.detach_old_tables(v);
+#ifdef CONFIG_HVM
+ if ( shadow_mode_external(d) )
+ {
+ mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+
+ if ( mfn_x(mfn) )
+ v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn);
+
+ v->arch.hvm.monitor_table = pagetable_null();
+ }
+#endif
+
+ out:
+ paging_unlock(d);
+}
+
void shadow_teardown(struct domain *d, bool *preempted)
/* Destroy the shadow pagetables of this domain and free its shadow memory.
* Should only be called for dying domains. */
@@ -2785,29 +2811,11 @@ void shadow_teardown(struct domain *d, bool *preempted)
ASSERT(d->is_dying);
ASSERT(d != current->domain);
- paging_lock(d);
-
- if ( shadow_mode_enabled(d) )
- {
- /* Release the shadow and monitor tables held by each vcpu */
- for_each_vcpu(d, v)
- {
- if ( v->arch.paging.mode )
- {
- v->arch.paging.mode->shadow.detach_old_tables(v);
-#ifdef CONFIG_HVM
- if ( shadow_mode_external(d) )
- {
- mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
+ /* TODO - Remove when the teardown path is better structured. */
+ for_each_vcpu ( d, v )
+ shadow_vcpu_teardown(v);
- if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
- v->arch.paging.mode->shadow.destroy_monitor_table(v,
mfn);
- v->arch.hvm.monitor_table = pagetable_null();
- }
-#endif /* CONFIG_HVM */
- }
- }
- }
+ paging_lock(d);
#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC))
/* Free the virtual-TLB array attached to each vcpu */
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index d489df3812..90dece29de 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -36,6 +36,7 @@ int hap_domctl(struct domain *d, struct
xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
+void hap_vcpu_teardown(struct vcpu *v);
void hap_teardown(struct domain *d, bool *preempted);
void hap_vcpu_init(struct vcpu *v);
int hap_track_dirty_vram(struct domain *d,
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index b803efa7b5..6b8ed80c64 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -234,7 +234,8 @@ int paging_domctl(struct domain *d, struct
xen_domctl_shadow_op *sc,
/* Helper hypercall for dealing with continuations. */
long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-/* Call when destroying a domain */
+/* Call when destroying a vcpu/domain */
+void paging_vcpu_teardown(struct vcpu *v);
int paging_teardown(struct domain *d);
/* Call once all of the references to the domain have gone away */
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 76e47f257f..29a86ed78e 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -74,7 +74,8 @@ int shadow_domctl(struct domain *d,
struct xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
-/* Call when destroying a domain */
+/* Call when destroying a vcpu/domain */
+void shadow_vcpu_teardown(struct vcpu *v);
void shadow_teardown(struct domain *d, bool *preempted);
/* Call once all of the references to the domain have gone away */
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |