[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT



This patch adds access control for NPT mode.

The access rights are stored in the NPT p2m table 56:53.
The bits are free after clearing the IOMMU flags [1].

Modify p2m_type_to_flags() to accept and interpret an access value,
parallel to the ept code.

Add a set_default_access() method to the p2m-pt and p2m-ept versions
of the p2m rather than setting it directly, to deal with different
default permitted access values.

[1] 854a49a7486a02edae5b3e53617bace526e9c1b1

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>

---
Note: Tested with xen-access write/exec

Changes since V1:
        - Remove radix tree and store access bits in NPT p2m table.
---
 xen/arch/x86/mm/mem_access.c |   7 +--
 xen/arch/x86/mm/p2m-ept.c    |   9 +++
 xen/arch/x86/mm/p2m-pt.c     | 111 ++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/p2m.h    |   2 +
 4 files changed, 109 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92b98..d8cc7a50de 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -374,10 +374,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
 
     /* If request to set default access. */
     if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        p2m->default_access = a;
-        return 0;
-    }
+        return p2m->set_default_access(p2m, a);
 
     p2m_lock(p2m);
     if ( ap2m )
@@ -520,7 +517,7 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required)
 
 bool p2m_mem_access_sanity_check(const struct domain *d)
 {
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+    return is_hvm_domain(d) && hap_enabled(d);
 }
 
 /*
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 6b8468c793..a6f2347bc1 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -624,6 +624,14 @@ bool_t ept_handle_misconfig(uint64_t gpa)
     return spurious ? (rc >= 0) : (rc > 0);
 }
 
+int ept_set_default_access(struct p2m_domain *p2m, p2m_access_t p2ma)
+{
+    /* All access is permitted */
+    p2m->default_access = p2ma;
+    
+    return 0;
+}
+
 /*
  * ept_set_entry() computes 'need_modify_vtd_table' for itself,
  * by observing whether any gfn->mfn translations are modified.
@@ -1222,6 +1230,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = ept_change_entry_type_global;
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
+    p2m->set_default_access = ept_set_default_access;
     p2m->audit_p2m = NULL;
     p2m->tlb_flush = ept_tlb_flush;
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3a0a500d66..7f79eac979 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -63,10 +63,13 @@
 #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
 #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
 
+#define _PAGE_ACCESS_BITS  (0x1e00) /* mem_access rights 16:13 */
+
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
-                                       unsigned int level)
+                                       unsigned int level,
+                                       p2m_access_t access)
 {
     unsigned long flags = (unsigned long)(t & 0x7f) << 12;
 
@@ -79,23 +82,28 @@ static unsigned long p2m_type_to_flags(const struct 
p2m_domain *p2m,
     case p2m_ram_paged:
     case p2m_ram_paging_in:
     default:
-        return flags | _PAGE_NX_BIT;
+        flags |= _PAGE_NX_BIT;
+        break;
     case p2m_grant_map_ro:
-        return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        break;
     case p2m_ioreq_server:
         flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
         if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
-            return flags & ~_PAGE_RW;
-        return flags;
+            flags &= ~_PAGE_RW;
+        break;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
     case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
+        flags |= P2M_BASE_FLAGS;
+        break;
     case p2m_ram_rw:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW;
+        break;
     case p2m_grant_map_rw:
     case p2m_map_foreign:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        break;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
@@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct 
p2m_domain *p2m,
             flags |= _PAGE_PWT;
             ASSERT(!level);
         }
-        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
+        break;
     }
+
+    switch ( access )
+    {
+    case p2m_access_r:
+        flags |= _PAGE_NX_BIT;
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rw:
+        flags |= _PAGE_NX_BIT;
+        break;
+    case p2m_access_rx:
+    case p2m_access_rx2rw:
+        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
+        break;
+    case p2m_access_x:
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rwx:
+    default:
+        break;
+    }
+
+    return flags;
 }
 
 
@@ -152,6 +184,17 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t 
*p2m_entry, int page_order)
     p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
 }
 
+static void p2m_set_access(intpte_t *entry, p2m_access_t access)
+{
+    *entry |= put_pte_flags((get_pte_flags(*entry) & ~_PAGE_ACCESS_BITS) |
+                            (MASK_INSR(access, _PAGE_ACCESS_BITS)));
+}
+
+static p2m_access_t p2m_get_access(intpte_t entry)
+{
+    return (p2m_access_t)(MASK_EXTR(get_pte_flags(entry), _PAGE_ACCESS_BITS));
+}
+
 // Walk one level of the P2M table, allocating a new table if required.
 // Returns 0 on error.
 //
@@ -226,6 +269,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * 
PAGETABLE_ORDER)),
                                      flags);
+            p2m_set_access(&new_entry.l1, p2m->default_access);
             rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 
level);
             if ( rc )
             {
@@ -237,6 +281,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         unmap_domain_page(l1_entry);
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
+        p2m_set_access(&new_entry.l1, p2m->default_access);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
                                   level + 1);
         if ( rc )
@@ -425,8 +470,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long 
gfn)
         if ( nt != ot )
         {
             unsigned long mfn = l1e_get_pfn(e);
+            p2m_access_t a = p2m_get_access(e.l1);
             unsigned long flags = p2m_type_to_flags(p2m, nt,
-                                                    _mfn(mfn), level);
+                                                    _mfn(mfn), level, a);
 
             if ( level )
             {
@@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
     return rc;
 }
 
+static int p2m_pt_check_access(p2m_access_t p2ma)
+{
+    switch ( p2ma )
+    {
+    case p2m_access_n:
+    case p2m_access_w:
+    case p2m_access_wx:
+    case p2m_access_n2rwx:
+        return -EINVAL;
+    default:
+        break;
+    }
+    return 0;
+}
+
+static int p2m_pt_set_default_access(struct p2m_domain *p2m,
+                                     p2m_access_t p2ma)
+{
+    int rc = p2m_pt_check_access(p2ma);
+
+    if ( !rc )
+        p2m->default_access = p2ma;
+
+    return rc;
+}
+
 /* Checks only applicable to entries with order > PAGE_ORDER_4K */
 static void check_entry(mfn_t mfn, p2m_type_t new, p2m_type_t old,
                         unsigned int order)
@@ -514,6 +586,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
     if ( !sve )
         return -EOPNOTSUPP;
 
+    if ( (rc = p2m_pt_check_access(p2ma)) )
+        return rc;
+
     if ( tb_init_done )
     {
         struct {
@@ -564,10 +639,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l3e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -597,10 +673,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
-                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
+                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, 
p2ma));
         else
             entry_content = l1e_empty();
 
+        p2m_set_access(&entry_content.l1, p2ma);
         /* level 1 entry */
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -625,10 +702,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l2e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -676,8 +754,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
      * XXX Once we start explicitly registering MMIO regions in the p2m 
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
-    /* Not implemented except with EPT */
-    *a = p2m_access_rwx; 
+    *a = p2m_access_n;
 
     if ( gfn > p2m->max_mapped_pfn )
     {
@@ -740,6 +817,7 @@ pod_retry_l3:
                        l1_table_offset(addr));
             *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                                  p2m_flags_to_type(flags), p2m, gfn);
+            *a = p2m_get_access(l3e->l3);
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -779,6 +857,7 @@ pod_retry_l2:
         mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
         *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                              p2m_flags_to_type(flags), p2m, gfn);
+        *a = p2m_get_access(l2e->l2);
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -815,6 +894,7 @@ pod_retry_l1:
     }
     mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
+    *a = p2m_get_access(l1e->l1);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_any_ram(*t) || p2m_is_paging(*t));
@@ -1063,6 +1143,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
     p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
     p2m->write_p2m_entry = paging_write_p2m_entry;
+    p2m->set_default_access = p2m_pt_set_default_access;
 #if P2M_AUDIT
     p2m->audit_p2m = p2m_pt_audit_p2m;
 #else
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..32e71067b7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -269,6 +269,8 @@ struct p2m_domain {
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int 
level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
+    int                (*set_default_access)(struct p2m_domain *p2m,
+                                             p2m_access_t p2ma);
 
     /*
      * P2M updates may require TLBs to be flushed (invalidated).
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.