[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2 07/12] x86: allow per-domain mappings without NX bit or with specific mfn



For support of per-vcpu stacks we need per-vcpu trampolines. To be
able to put those into the per-domain mappings the upper levels
page tables must not have NX set for per-domain mappings.

In order to be able to reset the NX bit for a per-domain mapping add
a helper flipflags_perdomain_mapping() for flipping page table flags
of a specific mapped page.

To be able to use a page from xen heap for the last per-vcpu stack
page add a helper to map an arbitrary mfn in the perdomain area.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/mm.c        | 81 ++++++++++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/mm.h |  3 ++
 2 files changed, 81 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 74cdb6e14d..ab990cc667 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1568,7 +1568,7 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
 
     /* Slot 260: Per-domain mappings (if applicable). */
     l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
-        d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW)
+        d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR)
           : l4e_empty();
 
     /* Slot 261-: text/data/bss, RW M2P, vmap, frametable, directmap. */
@@ -5269,7 +5269,7 @@ int create_perdomain_mapping(struct domain *d, unsigned 
long va,
         }
         l2tab = __map_domain_page(pg);
         clear_page(l2tab);
-        l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR_RW);
+        l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
     }
     else
         l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
@@ -5311,7 +5311,7 @@ int create_perdomain_mapping(struct domain *d, unsigned 
long va,
                 l1tab = __map_domain_page(pg);
             }
             clear_page(l1tab);
-            *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR_RW);
+            *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
         }
         else if ( !l1tab )
             l1tab = map_l1t_from_l2e(*pl2e);
@@ -5401,6 +5401,81 @@ void destroy_perdomain_mapping(struct domain *d, 
unsigned long va,
     unmap_domain_page(l3tab);
 }
 
+void flipflags_perdomain_mapping(struct domain *d, unsigned long va,
+                                 unsigned int flags)
+{
+    const l3_pgentry_t *l3tab, *pl3e;
+
+    ASSERT(va >= PERDOMAIN_VIRT_START &&
+           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+
+    if ( !d->arch.perdomain_l3_pg )
+        return;
+
+    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    pl3e = l3tab + l3_table_offset(va);
+
+    if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
+    {
+        const l2_pgentry_t *l2tab = map_l2t_from_l3e(*pl3e);
+        const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+
+        if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
+        {
+            l1_pgentry_t *l1tab = map_l1t_from_l2e(*pl2e);
+            unsigned int off = l1_table_offset(va);
+
+            if ( (l1e_get_flags(l1tab[off]) & (_PAGE_PRESENT | _PAGE_AVAIL0)) 
==
+                 (_PAGE_PRESENT | _PAGE_AVAIL0) )
+                l1e_flip_flags(l1tab[off], flags);
+
+            unmap_domain_page(l1tab);
+        }
+
+        unmap_domain_page(l2tab);
+    }
+
+    unmap_domain_page(l3tab);
+}
+
+void addmfn_to_perdomain_mapping(struct domain *d, unsigned long va, mfn_t mfn)
+{
+    const l3_pgentry_t *l3tab, *pl3e;
+
+    ASSERT(va >= PERDOMAIN_VIRT_START &&
+           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+
+    if ( !d->arch.perdomain_l3_pg )
+        return;
+
+    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    pl3e = l3tab + l3_table_offset(va);
+
+    if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
+    {
+        const l2_pgentry_t *l2tab = map_l2t_from_l3e(*pl3e);
+        const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+
+        if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
+        {
+            l1_pgentry_t *l1tab = map_l1t_from_l2e(*pl2e);
+            unsigned int off = l1_table_offset(va);
+
+            if ( (l1e_get_flags(l1tab[off]) & (_PAGE_PRESENT | _PAGE_AVAIL0)) 
==
+                 (_PAGE_PRESENT | _PAGE_AVAIL0) )
+                free_domheap_page(l1e_get_page(l1tab[off]));
+
+            l1tab[off] = l1e_from_mfn(mfn, __PAGE_HYPERVISOR_RW);
+
+            unmap_domain_page(l1tab);
+        }
+
+        unmap_domain_page(l2tab);
+    }
+
+    unmap_domain_page(l3tab);
+}
+
 void free_perdomain_mappings(struct domain *d)
 {
     l3_pgentry_t *l3tab;
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 3013c266fe..fa158bd96a 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -582,6 +582,9 @@ int create_perdomain_mapping(struct domain *, unsigned long 
va,
                              struct page_info **);
 void destroy_perdomain_mapping(struct domain *, unsigned long va,
                                unsigned int nr);
+void flipflags_perdomain_mapping(struct domain *d, unsigned long va,
+                                 unsigned int flags);
+void addmfn_to_perdomain_mapping(struct domain *d, unsigned long va, mfn_t 
mfn);
 void free_perdomain_mappings(struct domain *);
 
 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int 
pxm);
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.