[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/18] arm/altp2m: Add altp2m init/teardown routines.



The p2m intialization now invokes intialization routines responsible for
the allocation and intitialization of altp2m structures. The same
applies to teardown routines. The functionality has been adopted from
the x86 altp2m implementation.

Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/arm/p2m.c            | 166 ++++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-arm/domain.h  |   6 ++
 xen/include/asm-arm/hvm/hvm.h |  12 +++
 xen/include/asm-arm/p2m.h     |  20 +++++
 4 files changed, 198 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index aa4e774..e72ca7a 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1400,19 +1400,103 @@ static void p2m_free_vmid(struct domain *d)
     spin_unlock(&vmid_alloc_lock);
 }
 
-void p2m_teardown(struct domain *d)
+static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 {
-    struct p2m_domain *p2m = &d->arch.p2m;
+    int ret = 0;
+
+    spin_lock_init(&p2m->lock);
+    INIT_PAGE_LIST_HEAD(&p2m->pages);
+
+    spin_lock(&p2m->lock);
+
+    p2m->domain = d;
+    p2m->access_required = false;
+    p2m->mem_access_enabled = false;
+    p2m->default_access = p2m_access_rwx;
+    p2m->p2m_class = p2m_host;
+    p2m->root = NULL;
+
+    /* Adopt VMID of the associated domain */
+    p2m->vmid = d->arch.p2m.vmid;
+    p2m->vttbr.vttbr = 0;
+    p2m->vttbr.vttbr_vmid = p2m->vmid;
+
+    p2m->max_mapped_gfn = 0;
+    p2m->lowest_mapped_gfn = ULONG_MAX;
+    radix_tree_init(&p2m->mem_access_settings);
+
+    spin_unlock(&p2m->lock);
+
+    return ret;
+}
+
+static void p2m_free_one(struct p2m_domain *p2m)
+{
+    mfn_t mfn;
+    unsigned int i;
     struct page_info *pg;
 
     spin_lock(&p2m->lock);
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
-        free_domheap_page(pg);
+        if ( pg != p2m->root )
+            free_domheap_page(pg);
+
+    for ( i = 0; i < P2M_ROOT_PAGES; i++ )
+    {
+        mfn = _mfn(page_to_mfn(p2m->root) + i);
+        clear_domain_page(mfn);
+    }
+    free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
+    p2m->root = NULL;
+
+    radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
+    spin_unlock(&p2m->lock);
+
+    xfree(p2m);
+}
+
+static struct p2m_domain *p2m_init_one(struct domain *d)
+{
+    struct p2m_domain *p2m = xzalloc(struct p2m_domain);
+
+    if ( !p2m )
+        return NULL;
+
+    if ( p2m_initialise(d, p2m) )
+        goto free_p2m;
+
+    return p2m;
+
+free_p2m:
+    xfree(p2m);
+    return NULL;
+}
+
+static void p2m_teardown_hostp2m(struct domain *d)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    struct page_info *pg = NULL;
+    mfn_t mfn;
+    unsigned int i;
+
+    spin_lock(&p2m->lock);
 
-    if ( p2m->root )
-        free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
+    while ( (pg = page_list_remove_head(&p2m->pages)) )
+        if ( pg != p2m->root )
+        {
+            mfn = _mfn(page_to_mfn(pg));
+            clear_domain_page(mfn);
+            free_domheap_page(pg);
+        }
 
+    for ( i = 0; i < P2M_ROOT_PAGES; i++ )
+    {
+        mfn = _mfn(page_to_mfn(p2m->root) + i);
+        clear_domain_page(mfn);
+    }
+    free_domheap_pages(p2m->root, P2M_ROOT_ORDER);
     p2m->root = NULL;
 
     p2m_free_vmid(d);
@@ -1422,7 +1506,7 @@ void p2m_teardown(struct domain *d)
     spin_unlock(&p2m->lock);
 }
 
-int p2m_init(struct domain *d)
+static int p2m_init_hostp2m(struct domain *d)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     int rc = 0;
@@ -1437,6 +1521,8 @@ int p2m_init(struct domain *d)
     if ( rc != 0 )
         goto err;
 
+    p2m->vttbr.vttbr_vmid = p2m->vmid;
+
     d->arch.vttbr = 0;
 
     p2m->root = NULL;
@@ -1454,6 +1540,74 @@ err:
     return rc;
 }
 
+static void p2m_teardown_altp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        if ( !d->arch.altp2m_p2m[i] )
+            continue;
+
+        p2m = d->arch.altp2m_p2m[i];
+        p2m_free_one(p2m);
+        d->arch.altp2m_vttbr[i] = INVALID_MFN;
+        d->arch.altp2m_p2m[i] = NULL;
+    }
+
+    d->arch.altp2m_active = false;
+}
+
+static int p2m_init_altp2m(struct domain *d)
+{
+    unsigned int i;
+    struct p2m_domain *p2m;
+
+    spin_lock_init(&d->arch.altp2m_lock);
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        d->arch.altp2m_vttbr[i] = INVALID_MFN;
+        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+        if ( p2m == NULL )
+        {
+            p2m_teardown_altp2m(d);
+            return -ENOMEM;
+        }
+        p2m->p2m_class = p2m_alternate;
+        p2m->access_required = 1;
+        _atomic_set(&p2m->active_vcpus, 0);
+    }
+
+    return 0;
+}
+
+void p2m_teardown(struct domain *d)
+{
+    /*
+     * We must teardown altp2m unconditionally because
+     * we initialise it unconditionally.
+     */
+    p2m_teardown_altp2m(d);
+
+    p2m_teardown_hostp2m(d);
+}
+
+int p2m_init(struct domain *d)
+{
+    int rc = 0;
+
+    rc = p2m_init_hostp2m(d);
+    if ( rc )
+        return rc;
+
+    rc = p2m_init_altp2m(d);
+    if ( rc )
+        p2m_teardown_hostp2m(d);
+
+    return rc;
+}
+
 int relinquish_p2m_mapping(struct domain *d)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 2039f16..6b9770f 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -29,6 +29,9 @@ enum domain_type {
 #define is_64bit_domain(d) (0)
 #endif
 
+#define MAX_ALTP2M      10 /* arbitrary */
+#define INVALID_ALTP2M  0xffff
+
 extern int dom0_11_mapping;
 #define is_domain_direct_mapped(d) ((d) == hardware_domain && dom0_11_mapping)
 
@@ -130,6 +133,9 @@ struct arch_domain
 
     /* altp2m: allow multiple copies of host p2m */
     bool_t altp2m_active;
+    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+    spinlock_t altp2m_lock;
+    uint64_t altp2m_vttbr[MAX_ALTP2M];
 }  __cacheline_aligned;
 
 struct arch_vcpu
diff --git a/xen/include/asm-arm/hvm/hvm.h b/xen/include/asm-arm/hvm/hvm.h
index 96c455c..28d5298 100644
--- a/xen/include/asm-arm/hvm/hvm.h
+++ b/xen/include/asm-arm/hvm/hvm.h
@@ -19,6 +19,18 @@
 #ifndef __ASM_ARM_HVM_HVM_H__
 #define __ASM_ARM_HVM_HVM_H__
 
+struct vttbr_data {
+    union {
+        struct {
+            u64 vttbr_baddr :40, /* variable res0: from 0-(x-1) bit */
+                res1        :8,
+                vttbr_vmid  :8,
+                res2        :8;
+        };
+        u64 vttbr;
+    };
+};
+
 struct hvm_function_table {
     char *name;
 
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 0d1e61e..a78d547 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -8,6 +8,9 @@
 #include <xen/p2m-common.h>
 #include <public/memory.h>
 
+#include <asm/atomic.h>
+#include <asm/hvm/hvm.h>
+
 #define paddr_bits PADDR_BITS
 
 /* Holds the bit size of IPAs in p2m tables.  */
@@ -17,6 +20,11 @@ struct domain;
 
 extern void memory_type_changed(struct domain *);
 
+typedef enum {
+    p2m_host,
+    p2m_alternate,
+} p2m_class_t;
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -66,6 +74,18 @@ struct p2m_domain {
     /* Radix tree to store the p2m_access_t settings as the pte's don't have
      * enough available bits to store this information. */
     struct radix_tree_root mem_access_settings;
+
+    /* Alternate p2m: count of vcpu's currently using this p2m. */
+    atomic_t active_vcpus;
+
+    /* Choose between: host/alternate */
+    p2m_class_t p2m_class;
+
+    /* Back pointer to domain */
+    struct domain *domain;
+
+    /* VTTBR information */
+    struct vttbr_data vttbr;
 };
 
 /* List of possible type for each page in the p2m entry.
-- 
2.8.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.