[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.5 v7 14/21] xen/arm: p2m changes for mem_access support



Add p2m_access_t to struct page_info and add necessary changes for
page table construction routines to pass the default access information.
We store the p2m_access_t info in page_info as the PTE lacks enough
software programmable bits.

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
---
v7: - Remove radix tree init/destroy and move p2m_access_t store to page_info.
    - Add p2m_gpfn_lock/unlock functions.
    - Add bool_t lock input to p2m_lookup and apply_p2m_changes so the caller
      can specify if locking should be performed. This is needed in order to
      support mem_access_check from common.
v6: - Move mem_event header include to first patch that needs it.
v5: - #include grouping style-fix.
v4: - Move p2m_get_hostp2m definition here.
---
 xen/arch/arm/p2m.c        | 66 +++++++++++++++++++------------
 xen/arch/arm/traps.c      |  6 +--
 xen/include/asm-arm/mm.h  | 11 +++++-
 xen/include/asm-arm/p2m.h | 98 ++++++++++++++++++++++++++++++++++-------------
 4 files changed, 125 insertions(+), 56 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index a7dcdf5..92c3bd6 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -155,7 +155,7 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m, 
paddr_t addr)
  * There are no processor functions to do a stage 2 only lookup therefore we
  * do a a software walk.
  */
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
+paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t, bool_t lock)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
@@ -168,7 +168,8 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr, 
p2m_type_t *t)
 
     *t = p2m_invalid;
 
-    spin_lock(&p2m->lock);
+    if ( lock )
+        spin_lock(&p2m->lock);
 
     first = p2m_map_first(p2m, paddr);
     if ( !first )
@@ -209,7 +210,8 @@ done:
     if (first) unmap_domain_page(first);
 
 err:
-    spin_unlock(&p2m->lock);
+    if ( lock )
+        spin_unlock(&p2m->lock);
 
     return maddr;
 }
@@ -229,7 +231,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
 }
 
 static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
-                               p2m_type_t t)
+                               p2m_type_t t, p2m_access_t a)
 {
     paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
     /* sh, xn and write bit will be defined in the following switches
@@ -347,7 +349,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
          for ( i=0 ; i < LPAE_ENTRIES; i++ )
          {
              pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
-                                    MATTR_MEM, t);
+                                    MATTR_MEM, t, p2m->default_access);
 
              /*
               * First and second level super pages set p2m.table = 0, but
@@ -367,7 +369,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
     unmap_domain_page(p);
 
-    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
+                           p2m->default_access);
 
     p2m_write_pte(entry, pte, flush_cache);
 
@@ -470,7 +473,8 @@ static int apply_one_level(struct domain *d,
                            paddr_t *maddr,
                            bool_t *flush,
                            int mattr,
-                           p2m_type_t t)
+                           p2m_type_t t,
+                           p2m_access_t a)
 {
     const paddr_t level_size = level_sizes[level];
     const paddr_t level_mask = level_masks[level];
@@ -499,7 +503,7 @@ static int apply_one_level(struct domain *d,
             page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
             if ( page )
             {
-                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                 if ( level < 3 )
                     pte.p2m.table = 0;
                 p2m_write_pte(entry, pte, flush_cache);
@@ -534,7 +538,7 @@ static int apply_one_level(struct domain *d,
              (level == 3 || !p2m_table(orig_pte)) )
         {
             /* New mapping is superpage aligned, make it */
-            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
             if ( level < 3 )
                 pte.p2m.table = 0; /* Superpage entry */
 
@@ -713,7 +717,10 @@ static int apply_p2m_changes(struct domain *d,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     bool_t lock,
+                     uint32_t mask,
+                     p2m_type_t t,
+                     p2m_access_t a)
 {
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -733,7 +740,8 @@ static int apply_p2m_changes(struct domain *d,
      */
     flush_pt = iommu_enabled && !iommu_has_feature(d, 
IOMMU_FEAT_COHERENT_WALK);
 
-    spin_lock(&p2m->lock);
+    if ( lock )
+        spin_lock(&p2m->lock);
 
     addr = start_gpaddr;
     while ( addr < end_gpaddr )
@@ -780,7 +788,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -802,7 +810,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -822,7 +830,7 @@ static int apply_p2m_changes(struct domain *d,
                               level, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         /* L3 had better have done something! We cannot descend any further */
         BUG_ON(ret == P2M_ONE_DESCEND);
@@ -865,10 +873,11 @@ out:
          */
         apply_p2m_changes(d, REMOVE,
                           start_gpaddr, addr + level_sizes[level], orig_maddr,
-                          mattr, p2m_invalid);
+                          mattr, 1, 0, p2m_invalid, 
d->arch.p2m.default_access);
     }
 
-    spin_unlock(&p2m->lock);
+    if ( lock )
+        spin_unlock(&p2m->lock);
 
     return rc;
 }
@@ -878,7 +887,8 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, 1, 0, p2m_ram_rw,
+                             d->arch.p2m.default_access);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -890,7 +900,8 @@ int map_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_mmio_direct);
+                             MATTR_DEV, 1, 0, p2m_mmio_direct,
+                             d->arch.p2m.default_access);
 }
 
 int unmap_mmio_regions(struct domain *d,
@@ -902,7 +913,8 @@ int unmap_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_invalid);
+                             MATTR_DEV, 1, 0, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -914,7 +926,8 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, 1, 0, t,
+                             d->arch.p2m.default_access);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -924,7 +937,8 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, 1, 0, p2m_invalid,
+                      d->arch.p2m.default_access);
 }
 
 int arch_grant_map_page_identity(struct domain *d, unsigned long frame,
@@ -1078,6 +1092,8 @@ int p2m_init(struct domain *d)
     p2m->max_mapped_gfn = 0;
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
+    p2m->default_access = p2m_access_rwx;
+
 err:
     spin_unlock(&p2m->lock);
 
@@ -1092,7 +1108,8 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, 1, 0, p2m_invalid,
+                              d->arch.p2m.default_access);
 }
 
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1106,12 +1123,13 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t 
start_mfn, xen_pfn_t end_mfn)
                              pfn_to_paddr(start_mfn),
                              pfn_to_paddr(end_mfn),
                              pfn_to_paddr(INVALID_MFN),
-                             MATTR_MEM, p2m_invalid);
+                             MATTR_MEM, 1, 0, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
 {
-    paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
+    paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL, 1);
     return p >> PAGE_SHIFT;
 }
 
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 25fa8a0..ae7458c 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1795,7 +1795,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
     printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
     printk("    TTBCR: 0x%08"PRIregister"\n", ttbcr);
     printk("    TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n",
-           ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL));
+           ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, 1));
 
     if ( ttbcr & TTBCR_EAE )
     {
@@ -1808,7 +1808,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
         return;
     }
 
-    paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL);
+    paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, 1);
     if ( paddr == INVALID_PADDR )
     {
         printk("Failed TTBR0 maddr lookup\n");
@@ -1823,7 +1823,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
          !(first[offset] & 0x2) )
         goto done;
 
-    paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL);
+    paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL, 1);
 
     if ( paddr == INVALID_PADDR )
     {
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 7fc3b97..a47afcb 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -3,9 +3,10 @@
 
 #include <xen/config.h>
 #include <xen/kernel.h>
-#include <asm/page.h>
-#include <public/xen.h>
 #include <xen/domain_page.h>
+#include <xen/p2m-common.h>
+#include <public/xen.h>
+#include <asm/page.h>
 
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
@@ -64,6 +65,10 @@ struct page_info
          */
         u32 tlbflush_timestamp;
     };
+
+    /* Mem access permission for this page. */
+    p2m_access_t a;
+
     u64 pad;
 };
 
@@ -348,6 +353,8 @@ static inline void put_page_and_type(struct page_info *page)
 
 void clear_and_clean_page(struct page_info *page);
 
+typedef unsigned long mfn_t;
+
 #endif /*  __ARCH_ARM_MM__ */
 /*
  * Local variables:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index bec0c9b..0049538 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -11,6 +11,31 @@ struct domain;
 
 extern void memory_type_changed(struct domain *);
 
+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
+    p2m_map_foreign,    /* Ram pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/* Look up a GFN and take a reference count on the backing page. */
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -44,27 +69,30 @@ struct p2m_domain {
          * at each p2m tree level. */
         unsigned long shattered[4];
     } stats;
-};
 
-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-    p2m_invalid = 0,    /* Nothing mapped here */
-    p2m_ram_rw,         /* Normal read/write guest RAM */
-    p2m_ram_ro,         /* Read-only; writes are silently dropped */
-    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
-    p2m_map_foreign,    /* Ram pages from foreign domain */
-    p2m_grant_map_rw,   /* Read/write grant mapping */
-    p2m_grant_map_ro,   /* Read-only grant mapping */
-    /* The types below are only used to decide the page attribute in the P2M */
-    p2m_iommu_map_rw,   /* Read/write iommu mapping */
-    p2m_iommu_map_ro,   /* Read-only iommu mapping */
-    p2m_max_real_type,  /* Types after this won't be store in the p2m */
-} p2m_type_t;
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiguously
+     * retyped get this access type. See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* If true, and an access fault comes in and there is no mem_event 
listener,
+     * pause domain. Otherwise, remove access restrictions. */
+    bool_t access_required;
+
+    /* Compat functions for mem_access_check. */
+    int                (*set_entry   )(struct p2m_domain *p2m,
+                                       unsigned long gfn,
+                                       mfn_t mfn, unsigned int page_order,
+                                       p2m_type_t p2mt,
+                                       p2m_access_t p2ma);
+    mfn_t              (*get_entry   )(struct p2m_domain *p2m,
+                                       unsigned long gfn,
+                                       p2m_type_t *p2mt,
+                                       p2m_access_t *p2ma,
+                                       p2m_query_t q,
+                                       unsigned int *page_order);
+
+};
 
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
@@ -98,7 +126,7 @@ void p2m_restore_state(struct vcpu *n);
 void p2m_dump_info(struct domain *d);
 
 /* Look up the MFN corresponding to a domain's PFN. */
-paddr_t p2m_lookup(struct domain *d, paddr_t gpfn, p2m_type_t *t);
+paddr_t p2m_lookup(struct domain *d, paddr_t gpfn, p2m_type_t *t, bool_t lock);
 
 /* Clean & invalidate caches corresponding to a region of guest address space 
*/
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn);
@@ -138,17 +166,12 @@ p2m_pod_decrease_reservation(struct domain *d,
                              xen_pfn_t gpfn,
                              unsigned int order);
 
-/* Look up a GFN and take a reference count on the backing page. */
-typedef unsigned int p2m_query_t;
-#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
-#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
-
 static inline struct page_info *get_page_from_gfn(
     struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
 {
     struct page_info *page;
     p2m_type_t p2mt;
-    paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), &p2mt);
+    paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), &p2mt, 1);
     unsigned long mfn = maddr >> PAGE_SHIFT;
 
     if (t)
@@ -201,6 +224,27 @@ int arch_grant_unmap_page_identity(struct domain *d, 
unsigned long frame);
 /* get host p2m table */
 #define p2m_get_hostp2m(d) (&((d)->arch.p2m))
 
+/* mem_event and mem_access are supported on any ARM guest */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
+static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
+static inline void p2m_gpfn_lock(struct p2m_domain *p2m, unsigned long gpfn)
+{
+    spin_lock(&p2m->lock);
+}
+
+static inline void p2m_gpfn_unlock(struct p2m_domain *p2m, unsigned long gpfn)
+{
+    spin_unlock(&p2m->lock);
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.