[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/P2M: split out init/teardown functions



commit e2b2ff6779580a3a56f338a81e8d74ec30eead8c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Apr 8 14:44:05 2022 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Apr 8 14:44:05 2022 +0200

    x86/P2M: split out init/teardown functions
    
    Mostly just code movement, and certainly no functional change intended.
    In p2m_final_teardown() the calls to p2m_teardown_{alt,nested}p2m() need
    to be guarded by an is_hvm_domain() check now, though. This matches
    p2m_init(). And p2m_is_logdirty_range() also gets moved inside the (so
    far) adjacent #ifdef.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/arch/x86/include/asm/hvm/vmx/vmx.h |   4 -
 xen/arch/x86/mm/Makefile               |   2 +
 xen/arch/x86/mm/altp2m.c               |  40 +++++
 xen/arch/x86/mm/mm-locks.h             |   2 -
 xen/arch/x86/mm/nested.c               |  74 +++++++++
 xen/arch/x86/mm/p2m-basic.c            | 207 +++++++++++++++++++++++++
 xen/arch/x86/mm/p2m-ept.c              |   1 +
 xen/arch/x86/mm/p2m.c                  | 265 +--------------------------------
 xen/arch/x86/mm/p2m.h                  |  22 +++
 9 files changed, 348 insertions(+), 269 deletions(-)

diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index c2ebdd6864..8eedf59155 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -594,15 +594,11 @@ unsigned int vmx_get_cpl(void);
 void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 
-int ept_p2m_init(struct p2m_domain *p2m);
-void ept_p2m_uninit(struct p2m_domain *p2m);
-
 void ept_walk_table(struct domain *d, unsigned long gfn);
 bool_t ept_handle_misconfig(uint64_t gpa);
 int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
                        unsigned int order, bool *ipat, p2m_type_t type);
 void setup_ept_dump(void);
-void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
 /* Locate an alternate p2m by its EPTP */
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
 
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index a47ef5fd9f..c6eca5ac82 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -7,7 +7,9 @@ obj-$(CONFIG_SHADOW_PAGING) += guest_walk_4.o
 obj-$(CONFIG_MEM_ACCESS) += mem_access.o
 obj-$(CONFIG_MEM_PAGING) += mem_paging.o
 obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
+obj-$(CONFIG_HVM) += nested.o
 obj-y += p2m.o
+obj-y += p2m-basic.o
 obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o p2m-pt.o
 obj-y += paging.o
 obj-y += physmap.o
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index c091b03ea3..8be6eaf242 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -19,6 +19,8 @@
 #include <asm/hvm/hvm.h>
 #include <asm/p2m.h>
 #include <asm/altp2m.h>
+#include "mm-locks.h"
+#include "p2m.h"
 
 void
 altp2m_vcpu_initialise(struct vcpu *v)
@@ -123,6 +125,44 @@ void altp2m_vcpu_disable_ve(struct vcpu *v)
     }
 }
 
+int p2m_init_altp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
+
+    mm_lock_init(&d->arch.altp2m_list_lock);
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+        if ( p2m == NULL )
+        {
+            p2m_teardown_altp2m(d);
+            return -ENOMEM;
+        }
+        p2m->p2m_class = p2m_alternate;
+        p2m->access_required = hostp2m->access_required;
+        _atomic_set(&p2m->active_vcpus, 0);
+    }
+
+    return 0;
+}
+
+void p2m_teardown_altp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        if ( !d->arch.altp2m_p2m[i] )
+            continue;
+        p2m = d->arch.altp2m_p2m[i];
+        d->arch.altp2m_p2m[i] = NULL;
+        p2m_free_one(p2m);
+    }
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index fcfd4706ba..831e56d281 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -25,8 +25,6 @@
 #ifndef _MM_LOCKS_H
 #define _MM_LOCKS_H
 
-#include <asm/mem_sharing.h>
-
 /* Per-CPU variable for enforcing the lock ordering */
 DECLARE_PER_CPU(int, mm_lock_level);
 
diff --git a/xen/arch/x86/mm/nested.c b/xen/arch/x86/mm/nested.c
new file mode 100644
index 0000000000..fab62b2b2d
--- /dev/null
+++ b/xen/arch/x86/mm/nested.c
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * arch/x86/mm/nested.c
+ *
+ * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick 
Colp)
+ * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
+ * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
+ * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
+ * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/sched.h>
+#include <asm/p2m.h>
+#include "mm-locks.h"
+#include "p2m.h"
+
+void p2m_nestedp2m_init(struct p2m_domain *p2m)
+{
+    INIT_LIST_HEAD(&p2m->np2m_list);
+
+    p2m->np2m_base = P2M_BASE_EADDR;
+    p2m->np2m_generation = 0;
+}
+
+int p2m_init_nestedp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+
+    mm_lock_init(&d->arch.nested_p2m_lock);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        d->arch.nested_p2m[i] = p2m = p2m_init_one(d);
+        if ( p2m == NULL )
+        {
+            p2m_teardown_nestedp2m(d);
+            return -ENOMEM;
+        }
+        p2m->p2m_class = p2m_nested;
+        p2m->write_p2m_entry_pre = NULL;
+        p2m->write_p2m_entry_post = nestedp2m_write_p2m_entry_post;
+        list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
+    }
+
+    return 0;
+}
+
+void p2m_teardown_nestedp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        if ( !d->arch.nested_p2m[i] )
+            continue;
+        p2m = d->arch.nested_p2m[i];
+        list_del(&p2m->np2m_list);
+        p2m_free_one(p2m);
+        d->arch.nested_p2m[i] = NULL;
+    }
+}
diff --git a/xen/arch/x86/mm/p2m-basic.c b/xen/arch/x86/mm/p2m-basic.c
new file mode 100644
index 0000000000..be4a08b9cb
--- /dev/null
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ * arch/x86/mm/p2m-basic.c
+ *
+ * Basic P2M management largely applicable to all domain types.
+ *
+ * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick 
Colp)
+ * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
+ * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
+ * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
+ * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/types.h>
+#include <asm/p2m.h>
+#include "mm-locks.h"
+#include "p2m.h"
+
+/* Init the datastructures for later use by the p2m code */
+static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
+{
+    int ret = 0;
+
+    mm_rwlock_init(&p2m->lock);
+#ifdef CONFIG_HVM
+    INIT_PAGE_LIST_HEAD(&p2m->pages);
+#endif
+
+    p2m->domain = d;
+    p2m->default_access = p2m_access_rwx;
+    p2m->p2m_class = p2m_host;
+
+    if ( !is_hvm_domain(d) )
+        return 0;
+
+    p2m_pod_init(p2m);
+    p2m_nestedp2m_init(p2m);
+
+    if ( hap_enabled(d) && cpu_has_vmx )
+        ret = ept_p2m_init(p2m);
+    else
+        p2m_pt_init(p2m);
+
+    spin_lock_init(&p2m->ioreq.lock);
+
+    return ret;
+}
+
+struct p2m_domain *p2m_init_one(struct domain *d)
+{
+    struct p2m_domain *p2m = xzalloc(struct p2m_domain);
+
+    if ( !p2m )
+        return NULL;
+
+    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+        goto free_p2m;
+
+    if ( p2m_initialise(d, p2m) )
+        goto free_cpumask;
+    return p2m;
+
+ free_cpumask:
+    free_cpumask_var(p2m->dirty_cpumask);
+ free_p2m:
+    xfree(p2m);
+    return NULL;
+}
+
+void p2m_free_one(struct p2m_domain *p2m)
+{
+    p2m_free_logdirty(p2m);
+    if ( hap_enabled(p2m->domain) && cpu_has_vmx )
+        ept_p2m_uninit(p2m);
+    free_cpumask_var(p2m->dirty_cpumask);
+    xfree(p2m);
+}
+
+static int p2m_init_hostp2m(struct domain *d)
+{
+    struct p2m_domain *p2m = p2m_init_one(d);
+    int rc;
+
+    if ( !p2m )
+        return -ENOMEM;
+
+    rc = p2m_init_logdirty(p2m);
+
+    if ( !rc )
+        d->arch.p2m = p2m;
+    else
+        p2m_free_one(p2m);
+
+    return rc;
+}
+
+static void p2m_teardown_hostp2m(struct domain *d)
+{
+    /* Iterate over all p2m tables per domain */
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    if ( p2m )
+    {
+        p2m_free_one(p2m);
+        d->arch.p2m = NULL;
+    }
+}
+
+int p2m_init(struct domain *d)
+{
+    int rc;
+
+    rc = p2m_init_hostp2m(d);
+    if ( rc || !is_hvm_domain(d) )
+        return rc;
+
+    /*
+     * Must initialise nestedp2m unconditionally
+     * since nestedhvm_enabled(d) returns false here.
+     * (p2m_init runs too early for HVM_PARAM_* options)
+     */
+    rc = p2m_init_nestedp2m(d);
+    if ( rc )
+    {
+        p2m_teardown_hostp2m(d);
+        return rc;
+    }
+
+    rc = p2m_init_altp2m(d);
+    if ( rc )
+    {
+        p2m_teardown_hostp2m(d);
+        p2m_teardown_nestedp2m(d);
+    }
+
+    return rc;
+}
+
+/*
+ * Return all the p2m pages to Xen.
+ * We know we don't have any extra mappings to these pages.
+ *
+ * hvm fixme: when adding support for pvh non-hardware domains, this path must
+ * cleanup any foreign p2m types (release refcnts on them).
+ */
+void p2m_teardown(struct p2m_domain *p2m)
+{
+#ifdef CONFIG_HVM
+    struct page_info *pg;
+#endif
+    struct domain *d;
+
+    if ( !p2m )
+        return;
+
+    d = p2m->domain;
+
+    p2m_lock(p2m);
+
+    ASSERT(atomic_read(&d->shr_pages) == 0);
+
+#ifdef CONFIG_HVM
+    p2m->phys_table = pagetable_null();
+
+    while ( (pg = page_list_remove_head(&p2m->pages)) )
+        d->arch.paging.free_page(d, pg);
+#endif
+
+    p2m_unlock(p2m);
+}
+
+void p2m_final_teardown(struct domain *d)
+{
+    if ( is_hvm_domain(d) )
+    {
+        /*
+         * We must tear down both of them unconditionally because
+         * we initialise them unconditionally.
+         */
+        p2m_teardown_altp2m(d);
+        p2m_teardown_nestedp2m(d);
+    }
+
+    /* Iterate over all p2m tables per domain */
+    p2m_teardown_hostp2m(d);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 70a401c3a7..b04ca6dbe8 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -35,6 +35,7 @@
 #include <xen/softirq.h>
 
 #include "mm-locks.h"
+#include "p2m.h"
 
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1f1eddac48..6498cd460b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -35,7 +35,6 @@
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
-#include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
 #include <asm/mem_sharing.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/altp2m.h>
@@ -56,17 +55,9 @@ boolean_param("hap_2mb", opt_hap_2mb);
 
 DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
 
-static void p2m_nestedp2m_init(struct p2m_domain *p2m)
-{
 #ifdef CONFIG_HVM
-    INIT_LIST_HEAD(&p2m->np2m_list);
-
-    p2m->np2m_base = P2M_BASE_EADDR;
-    p2m->np2m_generation = 0;
-#endif
-}
 
-static int p2m_init_logdirty(struct p2m_domain *p2m)
+int p2m_init_logdirty(struct p2m_domain *p2m)
 {
     if ( p2m->logdirty_ranges )
         return 0;
@@ -79,7 +70,7 @@ static int p2m_init_logdirty(struct p2m_domain *p2m)
     return 0;
 }
 
-static void p2m_free_logdirty(struct p2m_domain *p2m)
+void p2m_free_logdirty(struct p2m_domain *p2m)
 {
     if ( !p2m->logdirty_ranges )
         return;
@@ -88,205 +79,6 @@ static void p2m_free_logdirty(struct p2m_domain *p2m)
     p2m->logdirty_ranges = NULL;
 }
 
-/* Init the datastructures for later use by the p2m code */
-static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
-{
-    int ret = 0;
-
-    mm_rwlock_init(&p2m->lock);
-#ifdef CONFIG_HVM
-    INIT_PAGE_LIST_HEAD(&p2m->pages);
-#endif
-
-    p2m->domain = d;
-    p2m->default_access = p2m_access_rwx;
-    p2m->p2m_class = p2m_host;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
-
-    p2m_pod_init(p2m);
-    p2m_nestedp2m_init(p2m);
-
-    if ( hap_enabled(d) && cpu_has_vmx )
-        ret = ept_p2m_init(p2m);
-    else
-        p2m_pt_init(p2m);
-
-    spin_lock_init(&p2m->ioreq.lock);
-
-    return ret;
-}
-
-static struct p2m_domain *p2m_init_one(struct domain *d)
-{
-    struct p2m_domain *p2m = xzalloc(struct p2m_domain);
-
-    if ( !p2m )
-        return NULL;
-
-    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
-        goto free_p2m;
-
-    if ( p2m_initialise(d, p2m) )
-        goto free_cpumask;
-    return p2m;
-
-free_cpumask:
-    free_cpumask_var(p2m->dirty_cpumask);
-free_p2m:
-    xfree(p2m);
-    return NULL;
-}
-
-static void p2m_free_one(struct p2m_domain *p2m)
-{
-    p2m_free_logdirty(p2m);
-    if ( hap_enabled(p2m->domain) && cpu_has_vmx )
-        ept_p2m_uninit(p2m);
-    free_cpumask_var(p2m->dirty_cpumask);
-    xfree(p2m);
-}
-
-static int p2m_init_hostp2m(struct domain *d)
-{
-    struct p2m_domain *p2m = p2m_init_one(d);
-    int rc;
-
-    if ( !p2m )
-        return -ENOMEM;
-
-    rc = p2m_init_logdirty(p2m);
-
-    if ( !rc )
-        d->arch.p2m = p2m;
-    else
-        p2m_free_one(p2m);
-
-    return rc;
-}
-
-static void p2m_teardown_hostp2m(struct domain *d)
-{
-    /* Iterate over all p2m tables per domain */
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-    if ( p2m )
-    {
-        p2m_free_one(p2m);
-        d->arch.p2m = NULL;
-    }
-}
-
-#ifdef CONFIG_HVM
-static void p2m_teardown_nestedp2m(struct domain *d)
-{
-    unsigned int i;
-    struct p2m_domain *p2m;
-
-    for ( i = 0; i < MAX_NESTEDP2M; i++ )
-    {
-        if ( !d->arch.nested_p2m[i] )
-            continue;
-        p2m = d->arch.nested_p2m[i];
-        list_del(&p2m->np2m_list);
-        p2m_free_one(p2m);
-        d->arch.nested_p2m[i] = NULL;
-    }
-}
-
-static int p2m_init_nestedp2m(struct domain *d)
-{
-    unsigned int i;
-    struct p2m_domain *p2m;
-
-    mm_lock_init(&d->arch.nested_p2m_lock);
-    for ( i = 0; i < MAX_NESTEDP2M; i++ )
-    {
-        d->arch.nested_p2m[i] = p2m = p2m_init_one(d);
-        if ( p2m == NULL )
-        {
-            p2m_teardown_nestedp2m(d);
-            return -ENOMEM;
-        }
-        p2m->p2m_class = p2m_nested;
-        p2m->write_p2m_entry_pre = NULL;
-        p2m->write_p2m_entry_post = nestedp2m_write_p2m_entry_post;
-        list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
-    }
-
-    return 0;
-}
-
-static void p2m_teardown_altp2m(struct domain *d)
-{
-    unsigned int i;
-    struct p2m_domain *p2m;
-
-    for ( i = 0; i < MAX_ALTP2M; i++ )
-    {
-        if ( !d->arch.altp2m_p2m[i] )
-            continue;
-        p2m = d->arch.altp2m_p2m[i];
-        d->arch.altp2m_p2m[i] = NULL;
-        p2m_free_one(p2m);
-    }
-}
-
-static int p2m_init_altp2m(struct domain *d)
-{
-    unsigned int i;
-    struct p2m_domain *p2m;
-    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
-
-    mm_lock_init(&d->arch.altp2m_list_lock);
-    for ( i = 0; i < MAX_ALTP2M; i++ )
-    {
-        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
-        if ( p2m == NULL )
-        {
-            p2m_teardown_altp2m(d);
-            return -ENOMEM;
-        }
-        p2m->p2m_class = p2m_alternate;
-        p2m->access_required = hostp2m->access_required;
-        _atomic_set(&p2m->active_vcpus, 0);
-    }
-
-    return 0;
-}
-#endif
-
-int p2m_init(struct domain *d)
-{
-    int rc;
-
-    rc = p2m_init_hostp2m(d);
-    if ( rc || !is_hvm_domain(d) )
-        return rc;
-
-#ifdef CONFIG_HVM
-    /* Must initialise nestedp2m unconditionally
-     * since nestedhvm_enabled(d) returns false here.
-     * (p2m_init runs too early for HVM_PARAM_* options) */
-    rc = p2m_init_nestedp2m(d);
-    if ( rc )
-    {
-        p2m_teardown_hostp2m(d);
-        return rc;
-    }
-
-    rc = p2m_init_altp2m(d);
-    if ( rc )
-    {
-        p2m_teardown_hostp2m(d);
-        p2m_teardown_nestedp2m(d);
-    }
-#endif
-
-    return rc;
-}
-
 int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start,
                           unsigned long end)
 {
@@ -298,8 +90,6 @@ int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned 
long start,
     return 0;
 }
 
-#ifdef CONFIG_HVM
-
 static void change_entry_type_global(struct p2m_domain *p2m,
                                      p2m_type_t ot, p2m_type_t nt)
 {
@@ -751,57 +541,6 @@ int p2m_alloc_table(struct p2m_domain *p2m)
     return 0;
 }
 
-#endif /* CONFIG_HVM */
-
-/*
- * hvm fixme: when adding support for pvh non-hardware domains, this path must
- * cleanup any foreign p2m types (release refcnts on them).
- */
-void p2m_teardown(struct p2m_domain *p2m)
-/* Return all the p2m pages to Xen.
- * We know we don't have any extra mappings to these pages */
-{
-#ifdef CONFIG_HVM
-    struct page_info *pg;
-#endif
-    struct domain *d;
-
-    if (p2m == NULL)
-        return;
-
-    d = p2m->domain;
-
-    p2m_lock(p2m);
-
-    ASSERT(atomic_read(&d->shr_pages) == 0);
-
-#ifdef CONFIG_HVM
-    p2m->phys_table = pagetable_null();
-
-    while ( (pg = page_list_remove_head(&p2m->pages)) )
-        d->arch.paging.free_page(d, pg);
-#endif
-
-    p2m_unlock(p2m);
-}
-
-void p2m_final_teardown(struct domain *d)
-{
-#ifdef CONFIG_HVM
-    /*
-     * We must teardown both of them unconditionally because
-     * we initialise them unconditionally.
-     */
-    p2m_teardown_altp2m(d);
-    p2m_teardown_nestedp2m(d);
-#endif
-
-    /* Iterate over all p2m tables per domain */
-    p2m_teardown_hostp2m(d);
-}
-
-#ifdef CONFIG_HVM
-
 static int __must_check
 p2m_remove_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
                  unsigned int page_order)
diff --git a/xen/arch/x86/mm/p2m.h b/xen/arch/x86/mm/p2m.h
index ec1d54f915..cc0f6766e4 100644
--- a/xen/arch/x86/mm/p2m.h
+++ b/xen/arch/x86/mm/p2m.h
@@ -15,8 +15,30 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+struct p2m_domain *p2m_init_one(struct domain *d);
+void p2m_free_one(struct p2m_domain *p2m);
+
 void p2m_pod_init(struct p2m_domain *p2m);
 
+#ifdef CONFIG_HVM
+int p2m_init_logdirty(struct p2m_domain *p2m);
+void p2m_free_logdirty(struct p2m_domain *p2m);
+#else
+static inline int p2m_init_logdirty(struct p2m_domain *p2m) { return 0; }
+static inline void p2m_free_logdirty(struct p2m_domain *p2m) {}
+#endif
+
+int p2m_init_altp2m(struct domain *d);
+void p2m_teardown_altp2m(struct domain *d);
+
+void p2m_nestedp2m_init(struct p2m_domain *p2m);
+int p2m_init_nestedp2m(struct domain *d);
+void p2m_teardown_nestedp2m(struct domain *d);
+
+int ept_p2m_init(struct p2m_domain *p2m);
+void ept_p2m_uninit(struct p2m_domain *p2m);
+void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
+
 /*
  * Local variables:
  * mode: C
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.