|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 2/5] xen/arm: introduce a generic p2m walker and use it in p2m_lookup
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Changes in v6:
- improve p2m_walker description;
- cast 1UL to (paddr_t);
- handle the first level within the loop;
- use literal 1, 2, 3 for the levels;
- add missing continue in case of superpages;
- handle third level leaves with !table as errors;
- ASSERT second and third are not NULL after being mapped.
Changes in v5:
- align tests;
- comment p2m_walker;
- fix return codes in p2m_walker;
- handle superpages in p2m_walker;
- rename _p2m_lookup to p2m_lookup_f.
---
xen/arch/arm/p2m.c | 134 ++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 108 insertions(+), 26 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 307c6d4..05048c2 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -31,48 +31,130 @@ void p2m_load_VTTBR(struct domain *d)
}
/*
- * Lookup the MFN corresponding to a domain's PFN.
+ * d: domain p2m to walk
+ * paddr: the guest start physical address
+ * order: page order
+ * func: function to call for each stage-2 lpae_t leaf entry found
+ * arg: opaque pointer to pass to func
*
- * There are no processor functions to do a stage 2 only lookup therefore we
- * do a a software walk.
*/
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr)
+static int p2m_walker(struct domain *d, paddr_t paddr, unsigned int order,
+ int (*func)(lpae_t *pte, void *arg, int level), void*
arg)
{
+ lpae_t *first = NULL, *second = NULL, *third = NULL;
struct p2m_domain *p2m = &d->arch.p2m;
- lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
- paddr_t maddr = INVALID_PADDR;
+ int rc = -EFAULT, level = 1;
+ unsigned long cur_first_offset = ~0, cur_second_offset = ~0;
+ paddr_t pend = paddr + ((paddr_t)1UL << order);
spin_lock(&p2m->lock);
first = __map_domain_page(p2m->first_level);
- pte = first[first_table_offset(paddr)];
- if ( !pte.p2m.valid || !pte.p2m.table )
- goto done;
+ if ( !first )
+ goto err;
- second = map_domain_page(pte.p2m.base);
- pte = second[second_table_offset(paddr)];
- if ( !pte.p2m.valid || !pte.p2m.table )
- goto done;
+ while ( paddr < pend )
+ {
+ rc = -EFAULT;
+ level = 1;
- third = map_domain_page(pte.p2m.base);
- pte = third[third_table_offset(paddr)];
+ if ( !first[first_table_offset(paddr)].p2m.valid )
+ goto err;
- /* This bit must be one in the level 3 entry */
- if ( !pte.p2m.table )
- pte.bits = 0;
+ if ( !first[first_table_offset(paddr)].p2m.table )
+ {
+ rc = func(&first[first_table_offset(paddr)], arg, level);
+ if ( rc != 0 )
+ goto err;
+ paddr += FIRST_SIZE;
+ continue;
+ }
-done:
- if ( pte.p2m.valid )
- maddr = (pte.bits & PADDR_MASK & PAGE_MASK) | (paddr & ~PAGE_MASK);
+ if ( cur_first_offset != first_table_offset(paddr) )
+ {
+ if (second) unmap_domain_page(second);
+ second =
map_domain_page(first[first_table_offset(paddr)].p2m.base);
+ cur_first_offset = first_table_offset(paddr);
+ }
+ level = 2;
+ ASSERT(second != NULL);
+ rc = -EFAULT;
+ if ( !second[second_table_offset(paddr)].p2m.valid )
+ goto err;
+ if ( !second[second_table_offset(paddr)].p2m.table )
+ {
+ rc = func(&first[first_table_offset(paddr)], arg, level);
+ if ( rc != 0 )
+ goto err;
+ paddr += SECOND_SIZE;
+ continue;
+ }
- if (third) unmap_domain_page(third);
- if (second) unmap_domain_page(second);
- if (first) unmap_domain_page(first);
+ if ( cur_second_offset != second_table_offset(paddr) )
+ {
+ if (third) unmap_domain_page(third);
+ third =
map_domain_page(second[second_table_offset(paddr)].p2m.base);
+ cur_second_offset = second_table_offset(paddr);
+ }
+ level = 3;
+ ASSERT(third != NULL);
+ rc = -EFAULT;
+ if ( !third[third_table_offset(paddr)].p2m.table ||
+ !third[third_table_offset(paddr)].p2m.valid )
+ goto err;
+
+ rc = func(&third[third_table_offset(paddr)], arg, level);
+ if ( rc != 0 )
+ goto err;
+
+ paddr += PAGE_SIZE;
+ }
+
+ rc = 0;
+
+err:
+ if ( third ) unmap_domain_page(third);
+ if ( second ) unmap_domain_page(second);
+ if ( first ) unmap_domain_page(first);
spin_unlock(&p2m->lock);
- return maddr;
+ return rc;
+}
+
+struct p2m_lookup_t {
+ paddr_t paddr;
+ paddr_t maddr;
+};
+
+static int p2m_lookup_f(lpae_t *ptep, void *arg, int level)
+{
+ lpae_t pte;
+ struct p2m_lookup_t *p2m = (struct p2m_lookup_t *)arg;
+ ASSERT(level == 3);
+
+ pte = *ptep;
+
+ p2m->maddr = (pte.bits & PADDR_MASK & PAGE_MASK) |
+ (p2m->paddr & ~PAGE_MASK);
+ return 0;
+}
+/*
+ * Lookup the MFN corresponding to a domain's PFN.
+ *
+ * There are no processor functions to do a stage 2 only lookup therefore we
+ * do a a software walk.
+ */
+paddr_t p2m_lookup(struct domain *d, paddr_t paddr)
+{
+ struct p2m_lookup_t p2m;
+ p2m.paddr = paddr;
+ p2m.maddr = INVALID_PADDR;
+
+ p2m_walker(d, paddr, 0, p2m_lookup_f, &p2m);
+
+ return p2m.maddr;
}
int guest_physmap_mark_populate_on_demand(struct domain *d,
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |