[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 15/17] x86: fill XPTI shadow pages and keep them in sync with guest L4



For being able to use the XPTI shadow L4 page tables in the hypervisor
fill them with the related entries of their masters and keep them in
sync when updates are done by the guest.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/mm.c              | 43 ++++++++++++++++++++++++++++++++++++++----
 xen/arch/x86/mm/shadow/multi.c |  2 ++
 xen/arch/x86/pv/dom0_build.c   |  3 +++
 xen/arch/x86/pv/xpti.c         | 35 ++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/pv/mm.h    |  4 ++++
 5 files changed, 83 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 16b004abe6..14dc776a52 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1609,6 +1609,18 @@ void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
                (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
                 l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
     }
+
+    if ( is_domain_xpti_active(d) )
+    {
+        unsigned int slot;
+
+        for ( slot = ROOT_PAGETABLE_FIRST_XEN_SLOT;
+              slot <= ROOT_PAGETABLE_LAST_XEN_SLOT;
+              slot++ )
+            xpti_update_l4(d,
+                           mfn_x(mfn_eq(sl4mfn, INVALID_MFN) ? l4mfn : sl4mfn),
+                           slot, l4t[slot]);
+    }
 }
 
 bool fill_ro_mpt(const struct domain *d, mfn_t mfn)
@@ -1621,6 +1633,9 @@ bool fill_ro_mpt(const struct domain *d, mfn_t mfn)
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
             idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
         ret = true;
+        if ( is_domain_xpti_active(d) )
+            xpti_update_l4(d, mfn_x(mfn), l4_table_offset(RO_MPT_VIRT_START),
+                           idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]);
     }
     unmap_domain_page(l4tab);
 
@@ -1632,6 +1647,11 @@ void zap_ro_mpt(const struct domain *d, mfn_t mfn)
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+
+    if ( is_domain_xpti_active(d) )
+        xpti_update_l4(d, mfn_x(mfn), l4_table_offset(RO_MPT_VIRT_START),
+                       l4e_empty());
+
     unmap_domain_page(l4tab);
 }
 
@@ -1682,6 +1702,8 @@ static int alloc_l4_table(struct page_info *page)
         }
 
         pl4e[i] = adjust_guest_l4e(pl4e[i], d);
+        if ( is_domain_xpti_active(d) )
+            xpti_update_l4(d, pfn, i, pl4e[i]);
     }
 
     if ( rc >= 0 )
@@ -2141,6 +2163,20 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
     return rc;
 }
 
+static bool update_l4pte(l4_pgentry_t *pl4e, l4_pgentry_t ol4e,
+                         l4_pgentry_t nl4e, unsigned long pfn,
+                         struct vcpu *v, bool preserve_ad)
+{
+    bool rc;
+
+    rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, v, preserve_ad);
+    if ( rc && is_vcpu_xpti_active(v) &&
+         (!paging_mode_shadow(v->domain) || !paging_get_hostmode(v)) )
+        xpti_update_l4(v->domain, pfn, pgentry_ptr_to_slot(pl4e), nl4e);
+
+    return rc;
+}
+
 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
 static int mod_l4_entry(l4_pgentry_t *pl4e,
                         l4_pgentry_t nl4e,
@@ -2175,7 +2211,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
         {
             nl4e = adjust_guest_l4e(nl4e, d);
-            rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
+            rc = update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
             return rc ? 0 : -EFAULT;
         }
 
@@ -2185,14 +2221,13 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         rc = 0;
 
         nl4e = adjust_guest_l4e(nl4e, d);
-        if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu,
-                                    preserve_ad)) )
+        if ( unlikely(!update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad)) 
)
         {
             ol4e = nl4e;
             rc = -EFAULT;
         }
     }
-    else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu,
+    else if ( unlikely(!update_l4pte(pl4e, ol4e, nl4e, pfn, vcpu,
                                      preserve_ad)) )
     {
         return -EFAULT;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 170163fbcf..110a5449a6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -954,6 +954,8 @@ static int shadow_set_l4e(struct domain *d,
     /* Write the new entry */
     shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
     flags |= SHADOW_SET_CHANGED;
+    if ( is_domain_xpti_active(d) )
+        xpti_update_l4(d, mfn_x(sl4mfn), pgentry_ptr_to_slot(sl4e), new_sl4e);
 
     if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
     {
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 6e7bc435ab..8ef9c87845 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -142,6 +142,9 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
             pl3e = __map_domain_page(page);
             clear_page(pl3e);
             *pl4e = l4e_from_page(page, L4_PROT);
+            if ( is_domain_xpti_active(d) )
+                xpti_update_l4(d, pgtbl_pfn, l4_table_offset(vphysmap_start),
+                               *pl4e);
         } else
             pl3e = map_l3t_from_l4e(*pl4e);
 
diff --git a/xen/arch/x86/pv/xpti.c b/xen/arch/x86/pv/xpti.c
index f663fae806..da83339563 100644
--- a/xen/arch/x86/pv/xpti.c
+++ b/xen/arch/x86/pv/xpti.c
@@ -357,6 +357,18 @@ static unsigned int xpti_shadow_getforce(struct 
xpti_domain *xd)
     return idx;
 }
 
+static void xpti_init_xen_l4(struct xpti_domain *xd, struct xpti_l4pg *l4pg)
+{
+    unsigned int i;
+    l4_pgentry_t *src, *dest;
+
+    src = map_domain_page(_mfn(l4pg->guest_mfn));
+    dest = mfn_to_virt(l4pg->xen_mfn);
+    for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
+        dest[i] = src[i];
+    unmap_domain_page(src);
+}
+
 static unsigned int xpti_shadow_get(struct xpti_domain *xd, unsigned long mfn)
 {
     unsigned int idx;
@@ -385,6 +397,9 @@ static unsigned int xpti_shadow_get(struct xpti_domain *xd, 
unsigned long mfn)
     l4pg->ref_next = l4ref->idx;
     l4ref->idx = idx;
 
+    /* Fill the shadow page table entries. */
+    xpti_init_xen_l4(xd, l4pg);
+
     return idx;
 }
 
@@ -403,6 +418,26 @@ static unsigned int xpti_shadow_activate(struct 
xpti_domain *xd,
     return idx;
 }
 
+void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                    unsigned int slot, l4_pgentry_t e)
+{
+    struct xpti_domain *xd = d->arch.pv_domain.xpti;
+    unsigned long flags;
+    unsigned int idx;
+    l4_pgentry_t *l4;
+
+    spin_lock_irqsave(&xd->lock, flags);
+
+    idx = xpti_shadow_from_hashlist(xd, mfn);
+    if ( idx != L4_INVALID )
+    {
+        l4 = mfn_to_virt(xd->l4pg[idx].xen_mfn);
+        l4[slot] = e;
+    }
+
+    spin_unlock_irqrestore(&xd->lock, flags);
+}
+
 void xpti_make_cr3(struct vcpu *v, unsigned long mfn)
 {
     struct xpti_domain *xd = v->domain->arch.pv_domain.xpti;
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 25c035988c..8a90af1084 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -36,6 +36,8 @@ int xpti_domain_init(struct domain *d);
 void xpti_domain_destroy(struct domain *d);
 void xpti_make_cr3(struct vcpu *v, unsigned long mfn);
 void xpti_free_l4(struct domain *d, unsigned long mfn);
+void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                    unsigned int slot, l4_pgentry_t e);
 
 static inline bool is_domain_xpti_active(const struct domain *d)
 {
@@ -73,6 +75,8 @@ static inline int xpti_domain_init(struct domain *d) { return 
0; }
 static inline void xpti_domain_destroy(struct domain *d) { }
 static inline void xpti_make_cr3(struct vcpu *v, unsigned long mfn) { }
 static inline void xpti_free_l4(struct domain *d, unsigned long mfn) { }
+static inline void xpti_update_l4(const struct domain *d, unsigned long mfn,
+                                  unsigned int slot, l4_pgentry_t e) { }
 
 static inline bool is_domain_xpti_active(const struct domain *d)
 { return false; }
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.