|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 1/3] xen/arm: generalize per-page GFN storage beyond xenheap pages
As preparation for fixing mfn_to_gfn() on ARM, we extend the existing GFN
field in page_info's type_info to be usable for not only xenheap ones.
Another usage will be introduced later for stolen pages in memory exchaging.
Introduce general-purpose page_get_gfn() and page_set_gfn() helpers
that read and write the GFN stored in type_info. The old
page_get_xenheap_gfn() and page_set_xenheap_gfn() are retained as thin
wrappers with their xenheap ASSERTs, so all current callers remain unchanged.
Also introduce PGT_INVALID_GFN as the general sentinel, with
PGT_INVALID_XENHEAP_GFN aliased to it for backward compatibility.
Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
xen/arch/arm/include/asm/mm.h | 37 +++++++++++++++++++++++------------
1 file changed, 25 insertions(+), 12 deletions(-)
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 72a6928624..d1873ec212 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -113,18 +113,21 @@ struct page_info
#define PGT_count_mask PG_mask(3, 3)
/*
- * Stored in bits [28:0] (arm32) or [60:0] (arm64) GFN if page is xenheap page.
+ * Stored in bits [28:0] (arm32) or [60:0] (arm64) GFN if page is xenheap page,
+ * or stolen ones in memory exchanging.
*/
#define PGT_gfn_width PG_shift(3)
#define PGT_gfn_mask ((1UL<<PGT_gfn_width)-1)
-#define PGT_INVALID_XENHEAP_GFN _gfn(PGT_gfn_mask)
+#define PGT_INVALID_GFN _gfn(PGT_gfn_mask)
+#define PGT_INVALID_XENHEAP_GFN PGT_INVALID_GFN
/*
* An arch-specific initialization pattern is needed for the type_info field
- * as it's GFN portion can contain the valid GFN if page is xenheap page.
+ * as it's GFN portion can contain the valid GFN if page is xenheap page, or
+ * stolen in memory exchanging.
*/
-#define PGT_TYPE_INFO_INITIALIZER gfn_x(PGT_INVALID_XENHEAP_GFN)
+#define PGT_TYPE_INFO_INITIALIZER gfn_x(PGT_INVALID_GFN)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated PG_shift(1)
@@ -338,28 +341,38 @@ unsigned int arch_get_dma_bitsize(void);
* that requirement (risk of deadlock, lock inversion, etc) it is important
* to make sure that all non-protected updates to this field are atomic.
*/
-static inline gfn_t page_get_xenheap_gfn(const struct page_info *p)
+static inline gfn_t page_get_gfn(const struct page_info *p)
{
gfn_t gfn_ = _gfn(ACCESS_ONCE(p->u.inuse.type_info) & PGT_gfn_mask);
- ASSERT(is_xen_heap_page(p));
-
- return gfn_eq(gfn_, PGT_INVALID_XENHEAP_GFN) ? INVALID_GFN : gfn_;
+ return gfn_eq(gfn_, PGT_INVALID_GFN) ? INVALID_GFN : gfn_;
}
-static inline void page_set_xenheap_gfn(struct page_info *p, gfn_t gfn)
+static inline void page_set_gfn(struct page_info *p, gfn_t gfn)
{
- gfn_t gfn_ = gfn_eq(gfn, INVALID_GFN) ? PGT_INVALID_XENHEAP_GFN : gfn;
+ gfn_t gfn_ = gfn_eq(gfn, INVALID_GFN) ? PGT_INVALID_GFN : gfn;
unsigned long x, nx, y = p->u.inuse.type_info;
- ASSERT(is_xen_heap_page(p));
-
do {
x = y;
nx = (x & ~PGT_gfn_mask) | gfn_x(gfn_);
} while ( (y = cmpxchg(&p->u.inuse.type_info, x, nx)) != x );
}
+static inline gfn_t page_get_xenheap_gfn(const struct page_info *p)
+{
+ ASSERT(is_xen_heap_page(p));
+
+ return page_get_gfn(p);
+}
+
+static inline void page_set_xenheap_gfn(struct page_info *p, gfn_t gfn)
+{
+ ASSERT(is_xen_heap_page(p));
+
+ page_set_gfn(p, gfn);
+}
+
#endif /* __ARCH_ARM_MM__ */
/*
* Local variables:
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |