[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 5/7] xen/arm: Data abort exception (R/W) mem_events.



This patch replaces the stub p2m functions with the actual ones required to
store, set, check and deliver LPAE R/W mem_events. As the LPAE PTE lacks
available bits to store the p2m_access_t bits, we use a separate radix
tree (mem_access_settings) to store the permissions with the key being the
gfn.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
---
 xen/arch/arm/p2m.c        | 407 +++++++++++++++++++++++++++++++++++++++-------
 xen/arch/arm/traps.c      |  25 ++-
 xen/include/asm-arm/p2m.h |  47 +++---
 3 files changed, 390 insertions(+), 89 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 0ca0d2f..41cfe99 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,7 @@
 #include <asm/event.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
+#include <xen/radix-tree.h>
 #include <xen/mem_event.h>
 #include <public/mem_event.h>
 #include <xen/mem_access.h>
@@ -148,13 +149,89 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m, 
paddr_t addr)
     return __map_domain_page(page);
 }
 
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+    /* First apply type permissions */
+    switch (t)
+    {
+    case p2m_ram_rw:
+        e->p2m.xn = 0;
+        e->p2m.write = 1;
+        break;
+
+    case p2m_ram_ro:
+        e->p2m.xn = 0;
+        e->p2m.write = 0;
+        break;
+
+    case p2m_iommu_map_rw:
+    case p2m_map_foreign:
+    case p2m_grant_map_rw:
+    case p2m_mmio_direct:
+        e->p2m.xn = 1;
+        e->p2m.write = 1;
+        break;
+
+    case p2m_iommu_map_ro:
+    case p2m_grant_map_ro:
+    case p2m_invalid:
+        e->p2m.xn = 1;
+        e->p2m.write = 0;
+        break;
+
+    case p2m_max_real_type:
+        BUG();
+        break;
+    }
+
+    /* Then restrict with access permissions */
+    switch(a)
+    {
+    case p2m_access_n:
+        e->p2m.read = e->p2m.write = 0;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_r:
+        e->p2m.write = 0;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_x:
+    case p2m_access_rx:
+        e->p2m.write = e->p2m.xn = 0;
+        break;
+    case p2m_access_w:
+    case p2m_access_rw:
+        e->p2m.write = 1;
+        e->p2m.xn = 1;
+        break;
+    case p2m_access_wx:
+    case p2m_access_rwx:
+        break;
+    }
+}
+
+static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache)
+{
+    write_pte(p, pte);
+    if ( flush_cache )
+        clean_xen_dcache(*p);
+}
+
 /*
  * Lookup the MFN corresponding to a domain's PFN.
  *
  * There are no processor functions to do a stage 2 only lookup therefore we
  * do a a software walk.
+ *
+ * [IN]:  d      Domain
+ * [IN]:  paddr  IPA
+ * [IN]:  a      (Optional) Update PTE access permission
+ * [OUT]: t      (Optional) Return PTE type
  */
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
+paddr_t p2m_lookup(struct domain *d,
+    paddr_t paddr,
+    p2m_access_t *a,
+    p2m_type_t *t)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     lpae_t pte, *first = NULL, *second = NULL, *third = NULL;
@@ -167,8 +244,6 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr, 
p2m_type_t *t)
 
     *t = p2m_invalid;
 
-    spin_lock(&p2m->lock);
-
     first = p2m_map_first(p2m, paddr);
     if ( !first )
         goto err;
@@ -200,6 +275,14 @@ done:
     {
         ASSERT(pte.p2m.type != p2m_invalid);
         maddr = (pte.bits & PADDR_MASK & mask) | (paddr & ~mask);
+        ASSERT(mfn_valid(maddr>>PAGE_SHIFT));
+
+        if ( a )
+        {
+            p2m_set_permission(&pte, pte.p2m.type, *a);
+            p2m_write_pte(&pte, pte, 1);
+        }
+
         *t = pte.p2m.type;
     }
 
@@ -208,8 +291,6 @@ done:
     if (first) unmap_domain_page(first);
 
 err:
-    spin_unlock(&p2m->lock);
-
     return maddr;
 }
 
@@ -228,7 +309,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
 }
 
 static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
-                               p2m_type_t t)
+                               p2m_type_t t, p2m_access_t a)
 {
     paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
     /* sh, xn and write bit will be defined in the following switches
@@ -258,37 +339,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned 
int mattr,
         break;
     }
 
-    switch (t)
-    {
-    case p2m_ram_rw:
-        e.p2m.xn = 0;
-        e.p2m.write = 1;
-        break;
-
-    case p2m_ram_ro:
-        e.p2m.xn = 0;
-        e.p2m.write = 0;
-        break;
-
-    case p2m_iommu_map_rw:
-    case p2m_map_foreign:
-    case p2m_grant_map_rw:
-    case p2m_mmio_direct:
-        e.p2m.xn = 1;
-        e.p2m.write = 1;
-        break;
-
-    case p2m_iommu_map_ro:
-    case p2m_grant_map_ro:
-    case p2m_invalid:
-        e.p2m.xn = 1;
-        e.p2m.write = 0;
-        break;
-
-    case p2m_max_real_type:
-        BUG();
-        break;
-    }
+    p2m_set_permission(&e, t, a);
 
     ASSERT(!(pa & ~PAGE_MASK));
     ASSERT(!(pa & ~PADDR_MASK));
@@ -298,13 +349,6 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned 
int mattr,
     return e;
 }
 
-static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache)
-{
-    write_pte(p, pte);
-    if ( flush_cache )
-        clean_xen_dcache(*p);
-}
-
 /*
  * Allocate a new page table page and hook it in via the given entry.
  * apply_one_level relies on this returning 0 on success
@@ -346,7 +390,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
          for ( i=0 ; i < LPAE_ENTRIES; i++ )
          {
              pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
-                                    MATTR_MEM, t);
+                                    MATTR_MEM, t, p2m->default_access);
 
              /*
               * First and second level super pages set p2m.table = 0, but
@@ -366,7 +410,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
     unmap_domain_page(p);
 
-    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid, 
p2m->default_access);
 
     p2m_write_pte(entry, pte, flush_cache);
 
@@ -461,7 +505,8 @@ static int apply_one_level(struct domain *d,
                            paddr_t *maddr,
                            bool_t *flush,
                            int mattr,
-                           p2m_type_t t)
+                           p2m_type_t t,
+                           p2m_access_t a)
 {
     /* Helpers to lookup the properties of each level */
     const paddr_t level_sizes[] =
@@ -497,7 +542,7 @@ static int apply_one_level(struct domain *d,
             page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
             if ( page )
             {
-                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                 if ( level < 3 )
                     pte.p2m.table = 0;
                 p2m_write_pte(entry, pte, flush_cache);
@@ -532,7 +577,7 @@ static int apply_one_level(struct domain *d,
              (level == 3 || !p2m_table(orig_pte)) )
         {
             /* New mapping is superpage aligned, make it */
-            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
             if ( level < 3 )
                 pte.p2m.table = 0; /* Superpage entry */
 
@@ -639,6 +684,7 @@ static int apply_one_level(struct domain *d,
 
         memset(&pte, 0x00, sizeof(pte));
         p2m_write_pte(entry, pte, flush_cache);
+        radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));
 
         *addr += level_size;
 
@@ -693,7 +739,8 @@ static int apply_p2m_changes(struct domain *d,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     p2m_type_t t,
+                     p2m_access_t a)
 {
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -758,7 +805,7 @@ static int apply_p2m_changes(struct domain *d,
                               1, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -779,7 +826,7 @@ static int apply_p2m_changes(struct domain *d,
                               2, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         count += ret;
         if ( ret != P2M_ONE_DESCEND ) continue;
@@ -798,7 +845,7 @@ static int apply_p2m_changes(struct domain *d,
                               3, flush_pt, op,
                               start_gpaddr, end_gpaddr,
                               &addr, &maddr, &flush,
-                              mattr, t);
+                              mattr, t, a);
         if ( ret < 0 ) { rc = ret ; goto out; }
         /* L3 had better have done something! We cannot descend any further */
         BUG_ON(ret == P2M_ONE_DESCEND);
@@ -840,7 +887,8 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, p2m_ram_rw,
+                             d->arch.p2m.default_access);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -852,7 +900,9 @@ int map_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr_mfns),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_mmio_direct);
+                             MATTR_DEV,
+                             p2m_mmio_direct,
+                             d->arch.p2m.default_access);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -864,7 +914,8 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, t,
+                             d->arch.p2m.default_access);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -874,7 +925,8 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid,
+                      d->arch.p2m.default_access);
 }
 
 int p2m_alloc_table(struct domain *d)
@@ -977,6 +1029,8 @@ void p2m_teardown(struct domain *d)
 
     p2m_free_vmid(d);
 
+    radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
     spin_unlock(&p2m->lock);
 }
 
@@ -1003,6 +1057,7 @@ int p2m_init(struct domain *d)
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
     p2m->default_access = p2m_access_rwx;
+    radix_tree_init(&p2m->mem_access_settings);
 
 err:
     spin_unlock(&p2m->lock);
@@ -1018,7 +1073,7 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, p2m_invalid, p2m->default_access);
 }
 
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1032,12 +1087,16 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t 
start_mfn, xen_pfn_t end_mfn)
                              pfn_to_paddr(start_mfn),
                              pfn_to_paddr(end_mfn),
                              pfn_to_paddr(INVALID_MFN),
-                             MATTR_MEM, p2m_invalid);
+                             MATTR_MEM,
+                             p2m_invalid, p2m->default_access);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
 {
-    paddr_t p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL);
+    paddr_t p;
+    spin_lock(&d->arch.p2m.lock);
+    p = p2m_lookup(d, pfn_to_paddr(gpfn), NULL, NULL);
+    spin_unlock(&d->arch.p2m.lock);
     return p >> PAGE_SHIFT;
 }
 
@@ -1069,6 +1128,234 @@ err:
     return page;
 }
 
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
+                          bool_t access_r, bool_t access_w, bool_t access_x,
+                          bool_t ptw)
+{
+    struct vcpu *v = current;
+    mem_event_request_t *req = NULL;
+    xenmem_access_t xma;
+    bool_t violation;
+    int rc;
+
+    /* If we have no listener, nothing to do */
+    if( !mem_event_check_ring( &v->domain->mem_event->access ) )
+    {
+        return 1;
+    }
+
+    rc = p2m_get_mem_access(v->domain, paddr_to_pfn(gpa), &xma);
+    if ( rc )
+        return rc;
+
+    switch (xma)
+    {
+        default:
+        case XENMEM_access_n:
+            violation = access_r || access_w || access_x;
+            break;
+        case XENMEM_access_r:
+            violation = access_w || access_x;
+            break;
+        case XENMEM_access_w:
+            violation = access_r || access_x;
+            break;
+        case XENMEM_access_x:
+            violation = access_r || access_w;
+            break;
+        case XENMEM_access_rx:
+            violation = access_w;
+            break;
+        case XENMEM_access_wx:
+            violation = access_r;
+            break;
+        case XENMEM_access_rw:
+            violation = access_x;
+            break;
+        case XENMEM_access_rwx:
+            violation = 0;
+            break;
+    }
+
+    if (!violation)
+        return 1;
+
+    req = xzalloc(mem_event_request_t);
+    if ( req )
+    {
+        req->reason = MEM_EVENT_REASON_VIOLATION;
+        req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+        req->gfn = gpa >> PAGE_SHIFT;
+        req->offset =  gpa & ((1 << PAGE_SHIFT) - 1);
+        req->gla = gla;
+        req->gla_valid = 1;
+        req->access_r = access_r;
+        req->access_w = access_w;
+        req->access_x = access_x;
+        req->vcpu_id = v->vcpu_id;
+
+        mem_access_send_req(v->domain, req);
+        vcpu_pause_nosync(v);
+
+        xfree(req);
+
+        return 0;
+    }
+
+    return 1;
+}
+
+void p2m_mem_access_resume(struct domain *d)
+{
+    mem_event_response_t rsp;
+
+    /* Pull all responses off the ring */
+    while( mem_event_get_response(d, &d->mem_event->access, &rsp) )
+    {
+        struct vcpu *v;
+
+        if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+            continue;
+
+        /* Validate the vcpu_id in the response. */
+        if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
+            continue;
+
+        v = d->vcpu[rsp.vcpu_id];
+
+        /* Unpause domain */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            mem_event_vcpu_unpause(v);
+    }
+}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_access_t a;
+    long rc = 0;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+#undef ACCESS
+    };
+
+    switch ( access )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        a = memaccess[access];
+        break;
+    case XENMEM_access_default:
+        a = p2m->default_access;
+        break;
+    default:
+        return -EINVAL;
+    }
+
+    /* If request to set default access */
+    if ( pfn == ~0ul )
+    {
+        p2m->default_access = a;
+        return 0;
+    }
+
+    spin_lock(&p2m->lock);
+    for ( pfn += start; nr > start; ++pfn )
+    {
+
+        unsigned long mfn = p2m_lookup(d, pfn_to_paddr(pfn), &a, NULL);
+        mfn >>= PAGE_SHIFT;
+
+        if ( !mfn_valid(mfn) )
+            break;
+
+        rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
+                                    radix_tree_int_to_ptr(a));
+
+        /* If a setting existed already, change it to the new one */
+        if ( -EEXIST == rc )
+        {
+            radix_tree_replace_slot(
+                radix_tree_lookup_slot(
+                    &p2m->mem_access_settings, pfn),
+                radix_tree_int_to_ptr(a));
+            rc = 0;
+        }
+        else
+        {
+            /* If we fail to save the setting in the Radix tree, we
+             * need to reset the PTE permissions to default. */
+            p2m_lookup(d, pfn_to_paddr(pfn), &p2m->default_access, NULL);
+            break;
+        }
+
+        /* Check for continuation if it's not the last iteration. */
+        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+        {
+            rc = start;
+            break;
+        }
+    }
+    spin_unlock(&p2m->lock);
+    return rc;
+}
+
+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+                       xenmem_access_t *access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    void *i;
+    int index;
+
+    static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+            ACCESS(n),
+            ACCESS(r),
+            ACCESS(w),
+            ACCESS(rw),
+            ACCESS(x),
+            ACCESS(rx),
+            ACCESS(wx),
+            ACCESS(rwx),
+#undef ACCESS
+    };
+
+    /* If request to get default access */
+    if ( gpfn == ~0ull )
+    {
+        *access = memaccess[p2m->default_access];
+        return 0;
+    }
+
+    spin_lock(&p2m->lock);
+
+    i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+
+    spin_unlock(&p2m->lock);
+
+    if (!i)
+        return -ESRCH;
+
+    index = radix_tree_ptr_to_int(i);
+
+    if ( (unsigned) index >= ARRAY_SIZE(memaccess) )
+        return -ERANGE;
+
+    *access =  memaccess[ (unsigned) index];
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 76a9586..82305c4 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1674,23 +1674,25 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
     uint32_t offset;
     uint32_t *first = NULL, *second = NULL;
 
+    spin_lock(&d->arch.p2m.lock);
+
     printk("dom%d VA 0x%08"PRIvaddr"\n", d->domain_id, addr);
     printk("    TTBCR: 0x%08"PRIregister"\n", ttbcr);
     printk("    TTBR0: 0x%016"PRIx64" = 0x%"PRIpaddr"\n",
-           ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL));
+           ttbr0, p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, NULL));
 
     if ( ttbcr & TTBCR_EAE )
     {
         printk("Cannot handle LPAE guest PT walk\n");
-        return;
+        goto err;
     }
     if ( (ttbcr & TTBCR_N_MASK) != 0 )
     {
         printk("Cannot handle TTBR1 guest walks\n");
-        return;
+        goto err;
     }
 
-    paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL);
+    paddr = p2m_lookup(d, ttbr0 & PAGE_MASK, NULL, NULL);
     if ( paddr == INVALID_PADDR )
     {
         printk("Failed TTBR0 maddr lookup\n");
@@ -1705,7 +1707,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
          !(first[offset] & 0x2) )
         goto done;
 
-    paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL);
+    paddr = p2m_lookup(d, first[offset] & PAGE_MASK, NULL, NULL);
 
     if ( paddr == INVALID_PADDR )
     {
@@ -1720,6 +1722,9 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
 done:
     if (second) unmap_domain_page(second);
     if (first) unmap_domain_page(first);
+
+err:
+    spin_unlock(&d->arch.p2m.lock);
 }
 
 static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
@@ -1749,9 +1754,6 @@ static void do_trap_data_abort_guest(struct cpu_user_regs 
*regs,
     info.gva = READ_SYSREG64(FAR_EL2);
 #endif
 
-    if (dabt.s1ptw)
-        goto bad_data_abort;
-
     rc = gva_to_ipa(info.gva, &info.gpa);
     if ( rc == -EFAULT )
         goto bad_data_abort;
@@ -1774,12 +1776,19 @@ static void do_trap_data_abort_guest(struct 
cpu_user_regs *regs,
         }
     }
 
+    rc = p2m_mem_access_check(info.gpa, info.gva,
+                         1, info.dabt.write, 0,
+                         info.dabt.s1ptw);
+
     if (handle_mmio(&info))
     {
         advance_pc(regs, hsr);
         return;
     }
 
+    if ( !rc )
+        return;
+
 bad_data_abort:
     inject_dabt_exception(regs, info.gva, hsr.len);
 }
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index f3d1f33..c0fc61d 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,6 +2,7 @@
 #define _XEN_P2M_H
 
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
 #include <public/memory.h>
 #include <public/mem_event.h>
 
@@ -39,15 +40,14 @@ typedef enum {
  * default.
  */
 typedef enum {
-    p2m_access_n     = 0, /* No access permissions allowed */
+    p2m_access_n     = 0, /* No access allowed */
     p2m_access_r     = 1,
-    p2m_access_w     = 2, 
+    p2m_access_w     = 2,
     p2m_access_rw    = 3,
-    p2m_access_x     = 4, 
+    p2m_access_x     = 4,
     p2m_access_rx    = 5,
-    p2m_access_wx    = 6, 
-    p2m_access_rwx   = 7
-
+    p2m_access_wx    = 6,
+    p2m_access_rwx   = 7,
     /* NOTE: Assumed to be only 4 bits right now */
 } p2m_access_t;
 
@@ -90,9 +90,13 @@ struct p2m_domain {
      * retyped get this access type.  See definition of p2m_access_t. */
     p2m_access_t default_access;
 
-    /* If true, and an access fault comes in and there is no mem_event 
listener, 
+    /* If true, and an access fault comes in and there is no mem_event 
listener,
      * pause domain.  Otherwise, remove access restrictions. */
-    bool_t       access_required;
+    bool_t access_required;
+
+    /* Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information. */
+    struct radix_tree_root mem_access_settings;
 
 };
 
@@ -128,7 +132,10 @@ void p2m_restore_state(struct vcpu *n);
 void p2m_dump_info(struct domain *d);
 
 /* Look up the MFN corresponding to a domain's PFN. */
-paddr_t p2m_lookup(struct domain *d, paddr_t gpfn, p2m_type_t *t);
+paddr_t p2m_lookup(struct domain *d,
+                   paddr_t gpfn,
+                   p2m_access_t *a,
+                   p2m_type_t *t);
 
 /* Clean & invalidate caches corresponding to a region of guest address space 
*/
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn);
@@ -185,7 +192,7 @@ static inline struct page_info *get_page_from_gfn(
 {
     struct page_info *page;
     p2m_type_t p2mt;
-    paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), &p2mt);
+    paddr_t maddr = p2m_lookup(d, pfn_to_paddr(gfn), NULL, &p2mt);
     unsigned long mfn = maddr >> PAGE_SHIFT;
 
     if (t)
@@ -234,26 +241,24 @@ static inline int get_page_and_type(struct page_info 
*page,
 /* get host p2m table */
 #define p2m_get_hostp2m(d)      (&((d)->arch.p2m))
 
+/* Send mem event based on the access (gla is -1ull if not available). Boolean
+ * return value indicates if trap needs to be injected into guest. */
+int p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
+                            bool_t access_r, bool_t access_w, bool_t access_x,
+                            bool_t ptw);
+
 /* Resumes the running of the VCPU, restarting the last instruction */
-static inline void p2m_mem_access_resume(struct domain *d) {}
+void p2m_mem_access_resume(struct domain *d);
 
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
-static inline
 long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, xenmem_access_t access)
-{
-    return -ENOSYS;
-}
+                        uint32_t start, uint32_t mask, xenmem_access_t access);
 
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
-static inline
 int p2m_get_mem_access(struct domain *d, unsigned long pfn,
-                       xenmem_access_t *access)
-{
-    return -ENOSYS;
-}
+                       xenmem_access_t *access);
 
 #endif /* _XEN_P2M_H */
 
-- 
2.0.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.