[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v6 23/43] arm/altp2m: Add support for altp2m init/teardown routines



From: Rose Spangler <Rose.Spangler@xxxxxxxxxxxxxx>

The p2m initialization now invokes initialization routines responsible for
the allocation and initialization of altp2m structures. The same applies to
teardown routines.

This commit adds the prerequisites for the common altp2m_init and
altp2m_teardown functions to work on ARM; No further changes were necessary
other than removing the CONFIG_X86 gating.

This is commit 12/12 of the altp2m_init/altp2m_teardown routines phase.

Signed-off-by: Rose Spangler <Rose.Spangler@xxxxxxxxxxxxxx>
Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
Signed-off-by: Aqib Javaid <Aqib.Javaid@xxxxxxxxxxxxxx>
---
v2: Shared code between host/altp2m init/teardown functions.
    Added conditional init/teardown of altp2m.
    Altp2m related functions are moved to altp2m.c

v3: Removed locking the altp2m_lock in altp2m_teardown. Locking this
    lock at this point is unnecessary.

    Removed re-setting altp2m_vttbr, altp2m_p2m, and altp2m_active
    values in the function "altp2m_teardown". Re-setting these values is
    unnecessary as the entire domain will be destroyed right afterwards.

    Removed check for "altp2m_enabled" in "p2m_init" as altp2m has not yet
    been enabled by libxl at this point.

    Removed check for "altp2m_enabled" before tearing down altp2m within
    the function "p2m_teardown" so that altp2m gets destroyed even if
    the HVM_PARAM_ALTP2M gets reset before "p2m_teardown" is called.

    Added initialization of the field d->arch.altp2m_active in
    "altp2m_init".

    Removed check for already initialized vmid's in "altp2m_init_one",
    as "altp2m_init_one" is now called always with an uninitialized p2m.

    Removed the array altp2m_vttbr[] in struct arch_domain.

v4: Removed initialization of altp2m_p2m[] to NULL in altp2m_init, as
    the "struct arch_domain" is already initialized to zero.

    We moved the definition of the macro MAX_ALTP2M to a common place in
    a separate commit.

v6: Reworked to use common altp2m init and teardown routines.

    Added altp2m_lock_init macro for use in altp2m_init.

    Split altp2m initialization in p2m_init into a separate function,
    p2m_init_altp2m, to more easily gate code behind CONFIG_ALTP2M.

    Pulled in addition of active_vcpus from a later patch in the patch
    series.

    Split teardown and free of altp2m views into p2m_teardown and
    p2m_teardown_final (part of altp2m_teardown), respectively.
---
 xen/arch/arm/include/asm/altp2m.h |  4 +++
 xen/arch/arm/include/asm/domain.h |  8 ++++++
 xen/arch/arm/include/asm/p2m.h    |  5 ++++
 xen/arch/arm/mmu/p2m.c            | 44 ++++++++++++++++++++++++++++++-
 xen/common/altp2m.c               |  2 --
 xen/include/xen/altp2m.h          |  2 --
 6 files changed, 60 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/include/asm/altp2m.h 
b/xen/arch/arm/include/asm/altp2m.h
index 698c35427e75..5a217f48b103 100644
--- a/xen/arch/arm/include/asm/altp2m.h
+++ b/xen/arch/arm/include/asm/altp2m.h
@@ -19,6 +19,10 @@ static inline bool altp2m_supported(void)
     return true;
 }
 
+#define altp2m_lock_init(d) spin_lock_init(&(d)->arch.altp2m_lock)
+#define altp2m_lock(d)      spin_lock(&(d)->arch.altp2m_lock)
+#define altp2m_unlock(d)    spin_unlock(&(d)->arch.altp2m_lock)
+
 /* Alternate p2m VCPU */
 static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
 {
diff --git a/xen/arch/arm/include/asm/domain.h 
b/xen/arch/arm/include/asm/domain.h
index 576dbdec20af..9e69d62086cd 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -128,6 +128,14 @@ struct arch_domain
 #endif
 
     struct resume_info resume_ctx;
+
+#ifdef CONFIG_ALTP2M
+    /*
+     * Lock that protects critical altp2m operations that must not be performed
+     * concurrently.
+     */
+    spinlock_t altp2m_lock;
+#endif
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h
index 8ae0cd7ff589..db715c4f8bfc 100644
--- a/xen/arch/arm/include/asm/p2m.h
+++ b/xen/arch/arm/include/asm/p2m.h
@@ -9,6 +9,8 @@
 #include <asm/current.h>
 #include <asm/hsr.h>
 
+#include <asm/atomic.h>
+
 #define paddr_bits PADDR_BITS
 
 /* Holds the bit size of IPAs in p2m tables.  */
@@ -117,6 +119,9 @@ struct p2m_domain {
     /* Keeping track on which CPU this p2m was used and for which vCPU */
     uint8_t last_vcpu_ran[NR_CPUS];
 
+    /* Alternate p2m: count of vcpu's currently using this p2m. */
+    atomic_t active_vcpus;
+
     /* Choose between: host/alternate. */
     p2m_class_t p2m_class;
 };
diff --git a/xen/arch/arm/mmu/p2m.c b/xen/arch/arm/mmu/p2m.c
index 0d37760ef5d5..04d17e787259 100644
--- a/xen/arch/arm/mmu/p2m.c
+++ b/xen/arch/arm/mmu/p2m.c
@@ -1,4 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
+#include <xen/altp2m.h>
 #include <xen/cpu.h>
 #include <xen/domain_page.h>
 #include <xen/ioreq.h>
@@ -7,6 +8,7 @@
 #include <xen/softirq.h>
 #include <xen/xmalloc.h>
 
+#include <asm/altp2m.h>
 #include <asm/alternative.h>
 #include <asm/event.h>
 #include <asm/flushtlb.h>
@@ -1486,6 +1488,20 @@ int p2m_teardown(struct domain *d)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
+#ifdef CONFIG_ALTP2M
+    unsigned int i;
+    int rc;
+
+    d->altp2m_active = false;
+
+    for ( i = 0; i < d->nr_altp2m; i++ )
+    {
+        rc = p2m_teardown_one(d->altp2m_p2m[i]);
+        if ( rc )
+            return rc;
+    }
+#endif
+
     return p2m_teardown_one(p2m);
 }
 
@@ -1499,6 +1515,9 @@ void p2m_final_teardown(struct domain *d)
      * where relinquish_p2m_mapping() has been called.
      */
 
+    if ( altp2m_supported() )
+        altp2m_teardown(d);
+
     while ( p2m_teardown_allocation(d) == -ERESTART )
         continue; /* No preemption support here */
     ASSERT(page_list_empty(&d->arch.paging.p2m_freelist));
@@ -1595,6 +1614,24 @@ struct p2m_domain *p2m_init_one(struct domain *d)
     return NULL;
 }
 
+static int p2m_init_altp2m(struct domain *d)
+{
+#ifdef CONFIG_ALTP2M
+    int rc;
+
+    rc = altp2m_init(d);
+    if ( rc )
+    {
+        p2m_free_one(p2m_get_hostp2m(d));
+        return rc;
+    }
+
+    d->altp2m_active = false;
+#endif
+
+    return 0;
+}
+
 static int p2m_init_hostp2m(struct domain *d)
 {
     struct p2m_domain *p2m = p2m_init_one(d);
@@ -1608,10 +1645,15 @@ static int p2m_init_hostp2m(struct domain *d)
 
 int p2m_init(struct domain *d)
 {
+    int rc;
     spin_lock_init(&d->arch.paging.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.p2m_freelist);
 
-    return p2m_init_hostp2m(d);
+    rc = p2m_init_hostp2m(d);
+    if ( rc )
+        return rc;
+
+    return p2m_init_altp2m(d);
 }
 
 /*
diff --git a/xen/common/altp2m.c b/xen/common/altp2m.c
index cda653b713f0..989d8bdcb923 100644
--- a/xen/common/altp2m.c
+++ b/xen/common/altp2m.c
@@ -15,7 +15,6 @@
 #include <asm/hvm/nestedhvm.h>
 #endif
 
-#if CONFIG_X86
 int altp2m_init(struct domain *d)
 {
     unsigned int i;
@@ -60,7 +59,6 @@ void altp2m_teardown(struct domain *d)
 
     XVFREE(d->altp2m_p2m);
 }
-#endif /* CONFIG_X86 */
 
 /*
  * altp2m operations are envisioned as being used in several different
diff --git a/xen/include/xen/altp2m.h b/xen/include/xen/altp2m.h
index 85ef22c2b29e..238c7a935586 100644
--- a/xen/include/xen/altp2m.h
+++ b/xen/include/xen/altp2m.h
@@ -13,13 +13,11 @@
  * regardless of CONFIG_ALTP2M
  */
 
-#ifdef CONFIG_X86
 /* Initialize altp2m views */
 int altp2m_init(struct domain *d);
 
 /* Free altp2m views */
 void altp2m_teardown(struct domain *d);
-#endif
 
 #ifdef CONFIG_ALTP2M
 
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.