|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V6 3/4] x86/mm: allocate logdirty_ranges for altp2ms
This patch is a pre-requisite for the one fixing VGA logdirty
freezes when using altp2m. It only concerns itself with the
ranges allocation / deallocation / initialization part. While
touching the code, I've switched global_logdirty from bool_t
to bool.
P2m_reset_altp2m() has been refactored to reduce code
repetition, and it now takes the p2m lock. Similar
refactoring has been done with p2m_activate_altp2m().
Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
Changes since V5:
- This is the second part of the former second patch split
requested by George.
- p2m_activate_altp2m() now puts the "if p2m_init_altp2m_logdirty()
succeeds, then call p2m_init_altp2m_ept()" in one places.
- p2m_flush_altp2m() now frees the logdirty ranges.
- Put the p2m_flush_table() -> ept_p2m_uninit() ->
ept_p2m_init() exclusively in p2m_reset_altp2m() and refactored
it to also conditionally free the logdirty ranges and reset
{min,max}_remapped_gfn.
---
xen/arch/x86/mm/p2m.c | 94 ++++++++++++++++++++++++++++++++---------------
xen/include/asm-x86/p2m.h | 2 +-
2 files changed, 65 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 418ff85..abdf443 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2282,6 +2282,34 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
return 1;
}
+static void p2m_reset_altp2m(struct domain *d, unsigned int idx,
+ bool reset_remapped, bool free_logdirty_ranges)
+{
+ struct p2m_domain *p2m;
+
+ ASSERT(idx < MAX_ALTP2M);
+ p2m = d->arch.altp2m_p2m[idx];
+
+ p2m_lock(p2m);
+
+ p2m_flush_table_locked(p2m);
+ /* Uninit and reinit ept to force TLB shootdown */
+
+ if ( free_logdirty_ranges )
+ p2m_free_logdirty(p2m);
+
+ ept_p2m_uninit(p2m);
+ ept_p2m_init(p2m);
+
+ if ( reset_remapped )
+ {
+ p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
+ p2m->max_remapped_gfn = 0;
+ }
+
+ p2m_unlock(p2m);
+}
+
void p2m_flush_altp2m(struct domain *d)
{
unsigned int i;
@@ -2290,16 +2318,40 @@ void p2m_flush_altp2m(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- p2m_flush_table(d->arch.altp2m_p2m[i]);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[i]);
- ept_p2m_init(d->arch.altp2m_p2m[i]);
+ p2m_reset_altp2m(d, i, false, true);
d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
}
altp2m_list_unlock(d);
}
+static int p2m_init_altp2m_logdirty(struct p2m_domain *p2m)
+{
+ struct p2m_domain *hostp2m = p2m_get_hostp2m(p2m->domain);
+ int rc = p2m_init_logdirty(p2m);
+
+ if ( rc )
+ return rc;
+
+ /* The following is really just a rangeset copy. */
+ return rangeset_merge(p2m->logdirty_ranges, hostp2m->logdirty_ranges);
+}
+
+static int p2m_activate_altp2m(struct domain *d, unsigned int idx)
+{
+ int rc;
+
+ ASSERT(idx < MAX_ALTP2M);
+ rc = p2m_init_altp2m_logdirty(d->arch.altp2m_p2m[idx]);
+
+ if ( rc )
+ return rc;
+
+ p2m_init_altp2m_ept(d, idx);
+
+ return 0;
+}
+
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
{
int rc = -EINVAL;
@@ -2310,10 +2362,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int
idx)
altp2m_list_lock(d);
if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
- {
- p2m_init_altp2m_ept(d, idx);
- rc = 0;
- }
+ rc = p2m_activate_altp2m(d, idx);
altp2m_list_unlock(d);
return rc;
@@ -2331,9 +2380,10 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
- p2m_init_altp2m_ept(d, i);
- *idx = i;
- rc = 0;
+ rc = p2m_activate_altp2m(d, i);
+
+ if ( !rc )
+ *idx = i;
break;
}
@@ -2360,10 +2410,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned
int idx)
if ( !_atomic_read(p2m->active_vcpus) )
{
- p2m_flush_table(d->arch.altp2m_p2m[idx]);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
- ept_p2m_init(d->arch.altp2m_p2m[idx]);
+ p2m_reset_altp2m(d, idx, false, true);
d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
rc = 0;
}
@@ -2488,16 +2535,6 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int
idx,
return rc;
}
-static void p2m_reset_altp2m(struct p2m_domain *p2m)
-{
- p2m_flush_table(p2m);
- /* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(p2m);
- ept_p2m_init(p2m);
- p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
- p2m->max_remapped_gfn = 0;
-}
-
int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt, p2m_access_t p2ma)
@@ -2531,7 +2568,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t
gfn,
{
if ( !reset_count++ )
{
- p2m_reset_altp2m(p2m);
+ p2m_reset_altp2m(d, i, true, false);
last_reset_idx = i;
}
else
@@ -2545,10 +2582,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t
gfn,
d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
- p2m_lock(p2m);
- p2m_reset_altp2m(p2m);
- p2m_unlock(p2m);
+ p2m_reset_altp2m(d, i, true, false);
}
ret = 0;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index ac33f50..c7f5710 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -222,7 +222,7 @@ struct p2m_domain {
struct rangeset *logdirty_ranges;
/* Host p2m: Global log-dirty mode enabled for the domain. */
- bool_t global_logdirty;
+ bool global_logdirty;
/* Host p2m: when this flag is set, don't flush all the nested-p2m
* tables on every host-p2m change. The setter of this flag
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |