[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/8] xen/arm: Implement p2m_set_entry for MPU systems



From: Penny Zheng <Penny.Zheng@xxxxxxx>

Implement the function `p2m_set_entry`, which is responsible for
inserting a new entry into the p2m tables, for MPU systems.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
Signed-off-by: Luca Fancellu <luca.fancellu@xxxxxxx>
Signed-off-by: Hari Limaye <hari.limaye@xxxxxxx>
Signed-off-by: Harry Ramsey <harry.ramsey@xxxxxxx>
---
 xen/arch/arm/include/asm/arm32/mpu.h |   3 +-
 xen/arch/arm/include/asm/arm64/mpu.h |   3 +-
 xen/arch/arm/include/asm/mpu/mm.h    |   3 +-
 xen/arch/arm/include/asm/mpu/p2m.h   |  10 +++
 xen/arch/arm/include/asm/p2m.h       |   3 +
 xen/arch/arm/mpu/mm.c                |  43 ++++++----
 xen/arch/arm/mpu/p2m.c               | 123 ++++++++++++++++++++++++++-
 7 files changed, 167 insertions(+), 21 deletions(-)

diff --git a/xen/arch/arm/include/asm/arm32/mpu.h 
b/xen/arch/arm/include/asm/arm32/mpu.h
index d565230f84ee..ab58df079920 100644
--- a/xen/arch/arm/include/asm/arm32/mpu.h
+++ b/xen/arch/arm/include/asm/arm32/mpu.h
@@ -42,7 +42,8 @@ typedef struct {
     prbar_t prbar;
     prlar_t prlar;
     uint8_t refcount;
-    uint8_t pad[7];     /* Pad structure to 16 Bytes */
+    uint8_t p2m_type;
+    uint8_t pad[6];     /* Pad structure to 16 Bytes */
 } pr_t;
 
 #endif /* __ASSEMBLER__ */
diff --git a/xen/arch/arm/include/asm/arm64/mpu.h 
b/xen/arch/arm/include/asm/arm64/mpu.h
index 8b86a03fee44..c82624f0f2cf 100644
--- a/xen/arch/arm/include/asm/arm64/mpu.h
+++ b/xen/arch/arm/include/asm/arm64/mpu.h
@@ -41,7 +41,8 @@ typedef struct {
     prbar_t prbar;
     prlar_t prlar;
     uint8_t refcount;
-    uint8_t pad[15];    /* Pad structure to 32 Bytes */
+    uint8_t p2m_type;
+    uint8_t pad[14];    /* Pad structure to 32 Bytes */
 } pr_t;
 
 #endif /* __ASSEMBLER__ */
diff --git a/xen/arch/arm/include/asm/mpu/mm.h 
b/xen/arch/arm/include/asm/mpu/mm.h
index 1b5ffa5b644d..24bffdee4fb6 100644
--- a/xen/arch/arm/include/asm/mpu/mm.h
+++ b/xen/arch/arm/include/asm/mpu/mm.h
@@ -75,9 +75,10 @@ void write_protection_region(const pr_t *pr_write, uint8_t 
sel);
  * @param base      Base address of the range to map (inclusive).
  * @param limit     Limit address of the range to map (exclusive).
  * @param flags     Flags for the memory range to map.
+ * @param p2m       True for a stage 2 mapping, otherwise False.
  * @return          0 on success, negative on error.
  */
-int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags);
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags, bool 
p2m);
 
 /*
  * Creates a pr_t structure describing a protection region.
diff --git a/xen/arch/arm/include/asm/mpu/p2m.h 
b/xen/arch/arm/include/asm/mpu/p2m.h
index 39fc0c944916..b9c7be2d9dcc 100644
--- a/xen/arch/arm/include/asm/mpu/p2m.h
+++ b/xen/arch/arm/include/asm/mpu/p2m.h
@@ -21,6 +21,16 @@ static inline void p2m_clear_root_pages(struct p2m_domain 
*p2m) {}
 
 static inline void p2m_tlb_flush_sync(struct p2m_domain *p2m) {}
 
+static inline void region_set_p2m(pr_t *pr, p2m_type_t p2m_type)
+{
+    pr->p2m_type = p2m_type;
+}
+
+static inline p2m_type_t region_get_p2m(pr_t *pr)
+{
+    return pr->p2m_type;
+}
+
 #endif /* __ARM_MPU_P2M_H__ */
 
 /*
diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h
index ed1b6dd40f40..43b383885da0 100644
--- a/xen/arch/arm/include/asm/p2m.h
+++ b/xen/arch/arm/include/asm/p2m.h
@@ -54,6 +54,9 @@ struct p2m_domain {
 #else
     /* Current Virtualization System Control Register for the p2m */
     register_t vsctlr;
+
+    /* Number of MPU memory regions in P2M MPU memory mapping table. */
+    uint8_t nr_regions;
 #endif
 
     /* Highest guest frame that's ever been mapped in the p2m */
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index aff88bd3a9c1..4ee58ded5ad6 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -317,13 +317,14 @@ static int xen_mpumap_free_entry(uint8_t idx, int 
region_found_type)
  * Update the entry in the MPU memory region mapping table (xen_mpumap) for the
  * given memory range and flags, creating one if none exists.
  *
- * @param base  Base address (inclusive).
- * @param limit Limit address (exclusive).
- * @param flags Region attributes (a combination of PAGE_HYPERVISOR_XXX)
+ * @param base      Base address (inclusive).
+ * @param limit     Limit address (exclusive).
+ * @param flags     Region attributes (a combination of PAGE_HYPERVISOR_XXX)
+ * @param p2m       True for a stage 2 mapping, otherwise False.
  * @return      0 on success, otherwise negative on error.
  */
 static int xen_mpumap_update_entry(paddr_t base, paddr_t limit,
-                                   unsigned int flags)
+                                   unsigned int flags, bool p2m)
 {
     bool flags_has_page_present;
     uint8_t idx;
@@ -399,6 +400,8 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t 
limit,
             return -ENOENT;
 
         xen_mpumap[idx] = pr_of_addr(base, limit, flags);
+        /* AP[0] always 1 for stage 2 */
+        xen_mpumap[idx].prbar.reg.ap_0 = (p2m ? 1 : 0);
 
         write_protection_region(&xen_mpumap[idx], idx);
     }
@@ -418,33 +421,41 @@ static int xen_mpumap_update_entry(paddr_t base, paddr_t 
limit,
     return 0;
 }
 
-int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags)
+int check_mpu_mapping(paddr_t base, paddr_t limit, unsigned int flags)
 {
-    int rc;
-
     if ( flags_has_rwx(flags) )
     {
         printk("Mappings should not be both Writeable and Executable\n");
-        return -EINVAL;
+        return false;
     }
 
     if ( base >= limit )
     {
         printk("Base address %#"PRIpaddr" must be smaller than limit address 
%#"PRIpaddr"\n",
                base, limit);
-        return -EINVAL;
+        return false;
     }
 
     if ( !IS_ALIGNED(base, PAGE_SIZE) || !IS_ALIGNED(limit, PAGE_SIZE) )
     {
         printk("base address %#"PRIpaddr", or limit address %#"PRIpaddr" is 
not page aligned\n",
                base, limit);
-        return -EINVAL;
+        return false;
     }
 
+    return true;
+}
+
+int xen_mpumap_update(paddr_t base, paddr_t limit, unsigned int flags, bool 
p2m)
+{
+    int rc;
+
+    if ( !check_mpu_mapping(base, limit, flags) )
+        return -EINVAL;
+
     spin_lock(&xen_mpumap_lock);
 
-    rc = xen_mpumap_update_entry(base, limit, flags);
+    rc = xen_mpumap_update_entry(base, limit, flags, p2m);
     if ( !rc )
         context_sync_mpu();
 
@@ -459,7 +470,7 @@ int destroy_xen_mappings(unsigned long s, unsigned long e)
     ASSERT(IS_ALIGNED(e, PAGE_SIZE));
     ASSERT(s < e);
 
-    return xen_mpumap_update(s, e, 0);
+    return xen_mpumap_update(s, e, 0, false);
 }
 
 int destroy_xen_mapping_containing(paddr_t s)
@@ -499,7 +510,7 @@ int map_pages_to_xen(unsigned long virt, mfn_t mfn, 
unsigned long nr_mfns,
                      unsigned int flags)
 {
     /* MPU systems have no translation, ma == va, so pass virt directly */
-    return xen_mpumap_update(virt, mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags);
+    return xen_mpumap_update(virt, mfn_to_maddr(mfn_add(mfn, nr_mfns)), flags, 
false);
 }
 
 /*
@@ -520,7 +531,7 @@ void __init setup_mm_helper(void)
             paddr_t bank_end = bank_start + bank_size;
 
             /* Map static heap with one MPU protection region */
-            if ( xen_mpumap_update(bank_start, bank_end, PAGE_HYPERVISOR) )
+            if ( xen_mpumap_update(bank_start, bank_end, PAGE_HYPERVISOR, 
false) )
                 panic("Failed to map static heap\n");
 
             break;
@@ -533,7 +544,7 @@ void __init setup_mm_helper(void)
 
 int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
 {
-    return xen_mpumap_update(s, e, nf);
+    return xen_mpumap_update(s, e, nf, false);
 }
 
 void dump_hyp_walk(vaddr_t addr)
@@ -598,7 +609,7 @@ void __iomem *ioremap_attr(paddr_t start, size_t len, 
unsigned int flags)
     paddr_t start_pg = round_pgdown(start);
     paddr_t end_pg = round_pgup(start + len);
 
-    if ( xen_mpumap_update(start_pg, end_pg, flags) )
+    if ( xen_mpumap_update(start_pg, end_pg, flags, false) )
         return NULL;
 
     /* Mapped or already mapped */
diff --git a/xen/arch/arm/mpu/p2m.c b/xen/arch/arm/mpu/p2m.c
index ec8f630acd90..4a8595b1b25e 100644
--- a/xen/arch/arm/mpu/p2m.c
+++ b/xen/arch/arm/mpu/p2m.c
@@ -8,12 +8,131 @@
 #include <xen/sched.h>
 #include <xen/types.h>
 #include <asm/p2m.h>
+#include <asm/mpu.h>
+
+static inline unsigned int build_p2m_flags(p2m_type_t t)
+{
+    unsigned int flags = 0;
+
+    BUILD_BUG_ON(p2m_max_real_type > (1 << 4));
+
+    switch ( t )
+    {
+    case p2m_ram_rw:
+        /* Nothing to do, XN=0, RO=0 */
+        break;
+
+    case p2m_ram_ro:
+        flags |= _PAGE_RO;
+        break;
+
+    case p2m_invalid:
+        flags |= _PAGE_XN | _PAGE_RO;
+        break;
+
+    case p2m_max_real_type:
+        BUG();
+        break;
+
+    case p2m_mmio_direct_dev:
+    case p2m_mmio_direct_nc:
+    case p2m_mmio_direct_c:
+    case p2m_iommu_map_ro:
+    case p2m_iommu_map_rw:
+    case p2m_map_foreign_ro:
+    case p2m_map_foreign_rw:
+    case p2m_grant_map_ro:
+    case p2m_grant_map_rw:
+        panic(XENLOG_G_ERR "p2m: UNIMPLEMENTED p2m permission in MPU 
system\n");
+        break;
+    }
+
+    flags |= MT_NORMAL;
+
+    return flags;
+}
+
+/*
+ * Check whether guest memory region [`sgfn`, `sgfn` + `nr_gfns`) is mapped in
+ * mpumap `table`.
+ *
+ * If the memory region is mapped, `idx` is set to the index of the associated
+ * MPU memory region and 0 is returned.
+ * If the memory region is not mapped, -ENOENT is returned.
+ */
+static int is_gfns_mapped(pr_t *table, uint8_t nr_regions, gfn_t sgfn,
+                          unsigned long nr_gfns, uint8_t *idx)
+{
+    paddr_t gbase = gfn_to_gaddr(sgfn);
+    paddr_t glimit = gfn_to_gaddr(gfn_add(sgfn, nr_gfns));
+    int rc;
+
+    rc = mpumap_contains_region(table, nr_regions, gbase, glimit, idx);
+    if ( MPUMAP_REGION_OVERLAP == rc )
+        return -EINVAL;
+
+    if ( MPUMAP_REGION_NOTFOUND == rc )
+        return -ENOENT;
+
+    return 0;
+}
+
+static int __p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned int nr,
+                    mfn_t smfn, p2m_type_t t, p2m_access_t a)
+{
+    pr_t *table;
+    mfn_t emfn = mfn_add(smfn, nr);
+    unsigned int flags;
+    uint8_t idx = INVALID_REGION_IDX;
+
+    /*
+     * In all cases other than when removing a mapping (mfn == MFN_INVALID),
+     * gfn == mfn on MPU systems.
+     */
+    if ( !mfn_eq(smfn, INVALID_MFN) && gfn_x(sgfn) != mfn_x(smfn) )
+    {
+        printk(XENLOG_G_ERR "Unable to map MFN %#"PRI_mfn" at %#"PRI_mfn"\n",
+               mfn_x(smfn), gfn_x(sgfn));
+        return -EINVAL;
+    }
+
+    table = (pr_t *)page_to_virt(p2m->root);
+    if ( !table )
+        return -EINVAL;
+
+    /* Already mapped */
+    if ( is_gfns_mapped(table, p2m->nr_regions, sgfn, nr, &idx) != -ENOENT )
+    {
+        printk(XENLOG_G_ERR "Unable to insert P2M MPU memory region 
%#"PRIpaddr"-%#"PRIpaddr"\n",
+               gfn_to_gaddr(sgfn), gfn_to_gaddr(gfn_add(sgfn, nr)));
+        return -EINVAL;
+    }
+
+    flags = build_p2m_flags(t);
+    table[p2m->nr_regions] = pr_of_addr(mfn_to_maddr(smfn),
+                                        mfn_to_maddr(mfn_add(smfn, nr)), 
flags);
+    region_set_p2m(&table[p2m->nr_regions], t);
+    p2m->nr_regions++;
+
+    p2m->max_mapped_gfn = gfn_max(p2m->max_mapped_gfn, _gfn(mfn_x(emfn)));
+    p2m->lowest_mapped_gfn = gfn_min(p2m->lowest_mapped_gfn, 
_gfn(mfn_x(smfn)));
+
+    return 0;
+}
 
 int p2m_set_entry(struct p2m_domain *p2m, gfn_t sgfn, unsigned long nr,
                   mfn_t smfn, p2m_type_t t, p2m_access_t a)
 {
-    BUG_ON("unimplemented");
-    return -EINVAL;
+    /*
+     * Any reference taken by the P2M mappings (e.g. foreign mapping) will
+     * be dropped in relinquish_p2m_mapping(). As the P2M will still
+     * be accessible after, we need to prevent mapping to be added when the
+     * domain is dying.
+     */
+    if ( unlikely(p2m->domain->is_dying) )
+        return -ENOMEM;
+
+    return __p2m_set_entry(p2m, sgfn, nr, smfn, t, a);
 }
 
 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.