[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/p2m: write_p2m_entry_{pre,post} hooks are HVM-only



commit 9b9ef2388c238a5b0d070a76a50efe696052bc52
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 3 15:29:49 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 3 15:29:49 2021 +0200

    x86/p2m: write_p2m_entry_{pre,post} hooks are HVM-only
    
    Move respective shadow code to its HVM-only source file, thus making it
    possible to exclude the hooks as well. This then shows that
    shadow_p2m_init() also isn't needed in !HVM builds.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c | 136 ----------------------------------------
 xen/arch/x86/mm/shadow/hvm.c    | 136 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/p2m.h       |   4 +-
 3 files changed, 137 insertions(+), 139 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 72d3816fcc..b060ebcb72 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2289,22 +2289,6 @@ void shadow_prepare_page_type_change(struct domain *d, 
struct page_info *page,
     shadow_remove_all_shadows(d, page_to_mfn(page));
 }
 
-static void
-sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
-/* Even harsher: this is a HVM page that we thing is no longer a pagetable.
- * Unshadow it, and recursively unshadow pages that reference it. */
-{
-    sh_remove_shadows(d, gmfn, 0, 1);
-    /* XXX TODO:
-     * Rework this hashtable walker to return a linked-list of all
-     * the shadows it modified, then do breadth-first recursion
-     * to find the way up to higher-level tables and unshadow them too.
-     *
-     * The current code (just tearing down each page's shadows as we
-     * detect that it is not a pagetable) is correct, but very slow.
-     * It means extra emulated writes and slows down removal of mappings. */
-}
-
 /**************************************************************************/
 
 /* Reset the up-pointers of every L3 shadow to 0.
@@ -3031,126 +3015,6 @@ static int shadow_test_disable(struct domain *d)
     return ret;
 }
 
-/**************************************************************************/
-/* P2M map manipulations */
-
-/* shadow specific code which should be called when P2M table entry is updated
- * with new content. It is responsible for update the entry, as well as other
- * shadow processing jobs.
- */
-
-static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
-                                       l1_pgentry_t old, l1_pgentry_t new,
-                                       unsigned int level)
-{
-    mfn_t omfn = l1e_get_mfn(old);
-    unsigned int oflags = l1e_get_flags(old);
-    p2m_type_t p2mt = p2m_flags_to_type(oflags);
-    bool flush = false;
-
-    /*
-     * If there are any shadows, update them.  But if shadow_teardown()
-     * has already been called then it's not safe to try.
-     */
-    if ( unlikely(!d->arch.paging.shadow.total_pages) )
-        return;
-
-    switch ( level )
-    {
-    default:
-        /*
-         * The following assertion is to make sure we don't step on 1GB host
-         * page support of HVM guest.
-         */
-        ASSERT(!((oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE)));
-        break;
-
-    /* If we're removing an MFN from the p2m, remove it from the shadows too */
-    case 1:
-        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(omfn) )
-        {
-            sh_remove_all_shadows_and_parents(d, omfn);
-            if ( sh_remove_all_mappings(d, omfn, _gfn(gfn)) )
-                flush = true;
-        }
-        break;
-
-    /*
-     * If we're removing a superpage mapping from the p2m, we need to check
-     * all the pages covered by it.  If they're still there in the new
-     * scheme, that's OK, but otherwise they must be unshadowed.
-     */
-    case 2:
-        if ( !(oflags & _PAGE_PRESENT) || !(oflags & _PAGE_PSE) )
-            break;
-
-        if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
-        {
-            unsigned int i;
-            mfn_t nmfn = l1e_get_mfn(new);
-            l1_pgentry_t *npte = NULL;
-
-            /* If we're replacing a superpage with a normal L1 page, map it */
-            if ( (l1e_get_flags(new) & _PAGE_PRESENT) &&
-                 !(l1e_get_flags(new) & _PAGE_PSE) &&
-                 mfn_valid(nmfn) )
-                npte = map_domain_page(nmfn);
-
-            gfn &= ~(L1_PAGETABLE_ENTRIES - 1);
-
-            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-            {
-                if ( !npte ||
-                     !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) ||
-                     !mfn_eq(l1e_get_mfn(npte[i]), omfn) )
-                {
-                    /* This GFN->MFN mapping has gone away */
-                    sh_remove_all_shadows_and_parents(d, omfn);
-                    if ( sh_remove_all_mappings(d, omfn, _gfn(gfn + i)) )
-                        flush = true;
-                }
-                omfn = mfn_add(omfn, 1);
-            }
-
-            if ( npte )
-                unmap_domain_page(npte);
-        }
-
-        break;
-    }
-
-    if ( flush )
-        guest_flush_tlb_mask(d, d->dirty_cpumask);
-}
-
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
-static void
-sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
-{
-    struct domain *d = p2m->domain;
-
-    /* If we're doing FAST_FAULT_PATH, then shadow mode may have
-       cached the fact that this is an mmio region in the shadow
-       page tables.  Blow the tables away to remove the cache.
-       This is pretty heavy handed, but this is a rare operation
-       (it might happen a dozen times during boot and then never
-       again), so it doesn't matter too much. */
-    if ( d->arch.paging.shadow.has_fast_mmio_entries )
-    {
-        shadow_blow_tables(d);
-        d->arch.paging.shadow.has_fast_mmio_entries = false;
-    }
-}
-#else
-# define sh_write_p2m_entry_post NULL
-#endif
-
-void shadow_p2m_init(struct p2m_domain *p2m)
-{
-    p2m->write_p2m_entry_pre  = sh_unshadow_for_p2m_change;
-    p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
-}
-
 /**************************************************************************/
 /* Log-dirty mode support */
 
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index 7f98286385..d5f42102a0 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -773,6 +773,142 @@ void sh_destroy_monitor_table(const struct vcpu *v, mfn_t 
mmfn,
     shadow_free(d, mmfn);
 }
 
+/**************************************************************************/
+/* P2M map manipulations */
+
+/* shadow specific code which should be called when P2M table entry is updated
+ * with new content. It is responsible for update the entry, as well as other
+ * shadow processing jobs.
+ */
+
+static void
+sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn)
+/* Even harsher: this is a HVM page that we thing is no longer a pagetable.
+ * Unshadow it, and recursively unshadow pages that reference it. */
+{
+    sh_remove_shadows(d, gmfn, 0, 1);
+    /* XXX TODO:
+     * Rework this hashtable walker to return a linked-list of all
+     * the shadows it modified, then do breadth-first recursion
+     * to find the way up to higher-level tables and unshadow them too.
+     *
+     * The current code (just tearing down each page's shadows as we
+     * detect that it is not a pagetable) is correct, but very slow.
+     * It means extra emulated writes and slows down removal of mappings. */
+}
+
+static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
+                                       l1_pgentry_t old, l1_pgentry_t new,
+                                       unsigned int level)
+{
+    mfn_t omfn = l1e_get_mfn(old);
+    unsigned int oflags = l1e_get_flags(old);
+    p2m_type_t p2mt = p2m_flags_to_type(oflags);
+    bool flush = false;
+
+    /*
+     * If there are any shadows, update them.  But if shadow_teardown()
+     * has already been called then it's not safe to try.
+     */
+    if ( unlikely(!d->arch.paging.shadow.total_pages) )
+        return;
+
+    switch ( level )
+    {
+    default:
+        /*
+         * The following assertion is to make sure we don't step on 1GB host
+         * page support of HVM guest.
+         */
+        ASSERT(!((oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE)));
+        break;
+
+    /* If we're removing an MFN from the p2m, remove it from the shadows too */
+    case 1:
+        if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(omfn) )
+        {
+            sh_remove_all_shadows_and_parents(d, omfn);
+            if ( sh_remove_all_mappings(d, omfn, _gfn(gfn)) )
+                flush = true;
+        }
+        break;
+
+    /*
+     * If we're removing a superpage mapping from the p2m, we need to check
+     * all the pages covered by it.  If they're still there in the new
+     * scheme, that's OK, but otherwise they must be unshadowed.
+     */
+    case 2:
+        if ( !(oflags & _PAGE_PRESENT) || !(oflags & _PAGE_PSE) )
+            break;
+
+        if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
+        {
+            unsigned int i;
+            mfn_t nmfn = l1e_get_mfn(new);
+            l1_pgentry_t *npte = NULL;
+
+            /* If we're replacing a superpage with a normal L1 page, map it */
+            if ( (l1e_get_flags(new) & _PAGE_PRESENT) &&
+                 !(l1e_get_flags(new) & _PAGE_PSE) &&
+                 mfn_valid(nmfn) )
+                npte = map_domain_page(nmfn);
+
+            gfn &= ~(L1_PAGETABLE_ENTRIES - 1);
+
+            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+            {
+                if ( !npte ||
+                     !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i]))) ||
+                     !mfn_eq(l1e_get_mfn(npte[i]), omfn) )
+                {
+                    /* This GFN->MFN mapping has gone away */
+                    sh_remove_all_shadows_and_parents(d, omfn);
+                    if ( sh_remove_all_mappings(d, omfn, _gfn(gfn + i)) )
+                        flush = true;
+                }
+                omfn = mfn_add(omfn, 1);
+            }
+
+            if ( npte )
+                unmap_domain_page(npte);
+        }
+
+        break;
+    }
+
+    if ( flush )
+        guest_flush_tlb_mask(d, d->dirty_cpumask);
+}
+
+#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
+static void
+sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
+{
+    struct domain *d = p2m->domain;
+
+    /* If we're doing FAST_FAULT_PATH, then shadow mode may have
+       cached the fact that this is an mmio region in the shadow
+       page tables.  Blow the tables away to remove the cache.
+       This is pretty heavy handed, but this is a rare operation
+       (it might happen a dozen times during boot and then never
+       again), so it doesn't matter too much. */
+    if ( d->arch.paging.shadow.has_fast_mmio_entries )
+    {
+        shadow_blow_tables(d);
+        d->arch.paging.shadow.has_fast_mmio_entries = false;
+    }
+}
+#else
+# define sh_write_p2m_entry_post NULL
+#endif
+
+void shadow_p2m_init(struct p2m_domain *p2m)
+{
+    p2m->write_p2m_entry_pre  = sh_unshadow_for_p2m_change;
+    p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
+}
+
 /**************************************************************************/
 /* VRAM dirty tracking support */
 int shadow_track_dirty_vram(struct domain *d,
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 776d3bb1b5..f33ae2327d 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -278,7 +278,6 @@ struct p2m_domain {
                                                   unsigned long first_gfn,
                                                   unsigned long last_gfn);
     void               (*memory_type_changed)(struct p2m_domain *p2m);
-#endif
     void               (*write_p2m_entry_pre)(struct domain *d,
                                               unsigned long gfn,
                                               l1_pgentry_t old,
@@ -286,6 +285,7 @@ struct p2m_domain {
                                               unsigned int level);
     void               (*write_p2m_entry_post)(struct p2m_domain *p2m,
                                                unsigned int oflags);
+#endif
 #if P2M_AUDIT
     long               (*audit_p2m)(struct p2m_domain *p2m);
 #endif
@@ -796,8 +796,6 @@ int __must_check p2m_set_entry(struct p2m_domain *p2m, 
gfn_t gfn, mfn_t mfn,
 #if defined(CONFIG_HVM)
 /* Set up function pointers for PT implementation: only for use by p2m code */
 extern void p2m_pt_init(struct p2m_domain *p2m);
-#elif defined(CONFIG_SHADOW_PAGING)
-# define p2m_pt_init shadow_p2m_init
 #else
 static inline void p2m_pt_init(struct p2m_domain *p2m) {}
 #endif
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.