[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 10/12] xen/riscv: introduce p2m_gpa_bits



common/device-tree/domain-build.c uses p2m_gpa_bits to determine the
upper bound of the GPA space when searching for unused regions, so it
must be defined when CONFIG_DOMAIN_BUILD_HELPERS=y.

The variable is initialised to PADDR_BITS and narrowed in p2m_init() to
the GPA width of the selected G-stage mode, allowing an external entity
(e.g. an IOMMU) to restrict it further if needed.

p2m_gpa_bits is a global rather than a per-domain value, which is
acceptable for now because all domains are required to use the same
G-stage MMU mode, as dom0less common code allocates it per all
domains.

Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
Changes in v3:
 - Update initialization of p2m_gpa_bits and the comment above.
 - Rework how p2m_gpa_bits is limited.
 - Update the commit message.
---
Changes in v2:
 - New patch.
---
 xen/arch/riscv/include/asm/p2m.h | 10 ++++++++--
 xen/arch/riscv/p2m.c             | 17 ++++++++++++++++-
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index b5b6a996baeb..0d1dace1a0d8 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -32,10 +32,13 @@
  */
 #define P2M_LEVEL_ORDER(lvl) XEN_PT_LEVEL_ORDER(lvl)
 
-#define P2M_ROOT_EXTRA_BITS(p2m, lvl) (2 * ((lvl) == P2M_ROOT_LEVEL(p2m)))
+#define P2M_ROOT_EXTRA_BITS 2
+
+#define P2M_LEVEL_EXTRA_BITS(p2m, lvl) \
+    (P2M_ROOT_EXTRA_BITS * ((lvl) == P2M_ROOT_LEVEL(p2m)))
 
 #define P2M_PAGETABLE_ENTRIES(p2m, lvl) \
-    (BIT(PAGETABLE_ORDER + P2M_ROOT_EXTRA_BITS(p2m, lvl), UL))
+    (BIT(PAGETABLE_ORDER + P2M_LEVEL_EXTRA_BITS(p2m, lvl), UL))
 
 #define P2M_TABLE_OFFSET(p2m, lvl) (P2M_PAGETABLE_ENTRIES(p2m, lvl) - 1UL)
 
@@ -44,6 +47,9 @@
 #define P2M_LEVEL_MASK(p2m, lvl) \
     (P2M_TABLE_OFFSET(p2m, lvl) << P2M_GFN_LEVEL_SHIFT(lvl))
 
+/* Holds the bit size of GPAs in p2m tables */
+extern unsigned int p2m_gpa_bits;
+
 #define paddr_bits PADDR_BITS
 
 /* Get host p2m table */
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index b0773083ff53..1f229e96d740 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -11,6 +11,8 @@
 #include <xen/sections.h>
 #include <xen/xvmalloc.h>
 
+#include <public/domctl.h>
+
 #include <asm/cpufeature.h>
 #include <asm/csr.h>
 #include <asm/flushtlb.h>
@@ -66,6 +68,12 @@ static const struct gstage_mode_desc modes[] = {
 
 const struct gstage_mode_desc * __ro_after_init max_gstage_mode = &modes[0];
 
+/*
+ * Set to the maximum configured support for GPA bits, so the number of GPA
+ * bits can be restricted by an external entity (e.g. IOMMU).
+ */
+unsigned int __ro_after_init p2m_gpa_bits = PADDR_BITS;
+
 static void p2m_free_page(struct p2m_domain *p2m, struct page_info *pg);
 
 static inline void p2m_free_metadata_page(struct p2m_domain *p2m,
@@ -359,6 +367,7 @@ int p2m_init(struct domain *d, const struct 
xen_domctl_createdomain *config)
      */
     static const struct gstage_mode_desc *m = &modes[0];
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    unsigned int gpa_bits;
 
     /*
      * "Trivial" initialisation is now complete.  Set the backpointer so the
@@ -408,6 +417,12 @@ int p2m_init(struct domain *d, const struct 
xen_domctl_createdomain *config)
 #   error "Add init of p2m->clean_dcache"
 #endif
 
+    gpa_bits = P2M_GFN_LEVEL_SHIFT(p2m->mode->paging_levels + 1) +
+               P2M_ROOT_EXTRA_BITS;
+
+    if ( gpa_bits < p2m_gpa_bits )
+        p2m_gpa_bits = gpa_bits;
+
     return 0;
 }
 
@@ -1345,7 +1360,7 @@ static mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t 
gfn,
 {
     unsigned int level = P2M_ROOT_LEVEL(p2m);
     unsigned int gfn_limit_bits =
-        P2M_LEVEL_ORDER(level + 1) + P2M_ROOT_EXTRA_BITS(p2m, level);
+        P2M_LEVEL_ORDER(level + 1) + P2M_LEVEL_EXTRA_BITS(p2m, level);
     pte_t entry, *table;
     int rc;
     mfn_t mfn = INVALID_MFN;
-- 
2.53.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.