|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC 09/16] xen/arm: p2m: Introduce a function to resolve translation fault
Currently a Stage-2 translation fault could happen:
1) MMIO emulation
2) When the page-tables is been updated using Break-Before-Make
3) Page not mapped
A follow-up patch will re-purpose the valid bit in an entry to generate
translation fault. This would be used to do an action on each entries to
track page used for a given period.
A new function is introduced to try to resolve a translation fault. This
will include 2) and the new way to generate fault explained above.
To avoid invalidating all the page-tables entries in one go. It is
possible to invalidate the top-level table and then on trap invalidate
the table one-level down. This will be repeated until a block/page entry
has been reached.
At the moment, there are no action done when reaching a block/page entry
but setting the valid bit to 1.
Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
---
xen/arch/arm/p2m.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++++++
xen/arch/arm/traps.c | 7 +--
2 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index ec956bc151..af445d3313 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1043,6 +1043,133 @@ int p2m_set_entry(struct p2m_domain *p2m,
return rc;
}
+/* Invalidate all entries in the table. The p2m should be write locked. */
+static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
+{
+ lpae_t *table;
+ unsigned int i;
+
+ ASSERT(p2m_is_write_locked(p2m));
+
+ table = map_domain_page(mfn);
+
+ for ( i = 0; i < LPAE_ENTRIES; i++ )
+ {
+ lpae_t pte = table[i];
+
+ pte.p2m.valid = 0;
+
+ p2m_write_pte(&table[i], pte, p2m->clean_pte);
+ }
+
+ unmap_domain_page(table);
+
+ p2m->need_flush = true;
+}
+
+bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ unsigned int level = 0;
+ bool resolved = false;
+ lpae_t entry, *table;
+ paddr_t addr = gfn_to_gaddr(gfn);
+
+ /* Convenience aliases */
+ const unsigned int offsets[4] = {
+ zeroeth_table_offset(addr),
+ first_table_offset(addr),
+ second_table_offset(addr),
+ third_table_offset(addr)
+ };
+
+ p2m_write_lock(p2m);
+
+ /* This gfn is higher than the highest the p2m map currently holds */
+ if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
+ goto out;
+
+ table = p2m_get_root_pointer(p2m, gfn);
+ /*
+ * The table should always be non-NULL because the gfn is below
+ * p2m->max_mapped_gfn and the root table pages are always present.
+ */
+ BUG_ON(table == NULL);
+
+ /*
+ * Go down the page-tables until an entry has the valid bit unset or
+ * a block/page entry has been hit.
+ */
+ for ( level = P2M_ROOT_LEVEL; level <= 3; level++ )
+ {
+ int rc;
+
+ entry = table[offsets[level]];
+
+ if ( level == 3 )
+ break;
+
+ /* Stop as soon as we hit an entry with the valid bit unset. */
+ if ( !lpae_is_valid(entry) )
+ break;
+
+ rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
+ if ( rc == GUEST_TABLE_MAP_FAILED )
+ goto out_unmap;
+ else if ( rc != GUEST_TABLE_NORMAL_PAGE )
+ break;
+ }
+
+ /*
+ * If the valid bit of the entry is set, it means someone was playing with
+ * the Stage-2 page table. Nothing to do and mark the fault as resolved.
+ */
+ if ( lpae_is_valid(entry) )
+ {
+ resolved = true;
+ goto out_unmap;
+ }
+
+ /*
+ * The valid bit is unset. If the entry is still not valid then the fault
+ * cannot be resolved, exit and report it.
+ */
+ if ( !p2m_is_valid(entry) )
+ goto out_unmap;
+
+ /*
+ * Now we have an entry with valid bit unset, but still valid from
+ * the P2M point of view.
+ *
+ * For entry pointing to a table, the table will be invalidated.
+ * For entry pointing to a block/page, no work to do for now.
+ */
+ if ( lpae_is_table(entry, level) )
+ p2m_invalidate_table(p2m, lpae_get_mfn(entry));
+
+ /*
+ * Now that the work on the entry is done, set the valid bit to prevent
+ * another fault on that entry.
+ */
+ resolved = true;
+ entry.p2m.valid = 1;
+
+ p2m_write_pte(table + offsets[level], entry, p2m->clean_pte);
+
+ /*
+ * No need to flush the TLBs as the modified entry had the valid bit
+ * unset.
+ */
+
+out_unmap:
+ unmap_domain_page(table);
+
+out:
+ p2m_write_unlock(p2m);
+
+ return resolved;
+}
+
static inline int p2m_insert_mapping(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index b40798084d..169b57cb6b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1882,6 +1882,8 @@ static bool try_map_mmio(gfn_t gfn)
return !map_regions_p2mt(d, gfn, 1, mfn, p2m_mmio_direct_c);
}
+bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
+
static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
const union hsr hsr)
{
@@ -1894,7 +1896,6 @@ static void do_trap_stage2_abort_guest(struct
cpu_user_regs *regs,
vaddr_t gva;
paddr_t gpa;
uint8_t fsc = xabt.fsc & ~FSC_LL_MASK;
- mfn_t mfn;
bool is_data = (hsr.ec == HSR_EC_DATA_ABORT_LOWER_EL);
/*
@@ -1977,8 +1978,8 @@ static void do_trap_stage2_abort_guest(struct
cpu_user_regs *regs,
* with the Stage-2 page table. Walk the Stage-2 PT to check
* if the entry exists. If it's the case, return to the guest
*/
- mfn = gfn_to_mfn(current->domain, gaddr_to_gfn(gpa));
- if ( !mfn_eq(mfn, INVALID_MFN) )
+ if ( p2m_resolve_translation_fault(current->domain,
+ gaddr_to_gfn(gpa)) )
return;
if ( is_data && try_map_mmio(gaddr_to_gfn(gpa)) )
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |