[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH Altp2m cleanup v5 3/3] Making altp2m domain dynamically allocated.



Ravi Sahita's dynamically allocated altp2m domain.
Introduce set_altp2m_active() and altp2m_active() api()s.

Signed-off-by: Ravi Sahita <ravi.sahita@xxxxxxxxx>
Signed-off-by: Paul Lai <paul.c.lai@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c        |  8 +++---
 xen/arch/x86/hvm/vmx/vmx.c    |  2 +-
 xen/arch/x86/mm/altp2m.c      | 16 +++++------
 xen/arch/x86/mm/mem_sharing.c |  2 +-
 xen/arch/x86/mm/mm-locks.h    |  4 +--
 xen/arch/x86/mm/p2m-ept.c     | 10 +++----
 xen/arch/x86/mm/p2m.c         | 63 ++++++++++++++++++++++++-------------------
 xen/common/monitor.c          |  1 +
 xen/include/asm-x86/altp2m.h  |  7 ++++-
 xen/include/asm-x86/domain.h  |  6 ++---
 xen/include/asm-x86/p2m.h     |  9 ++++++-
 11 files changed, 73 insertions(+), 55 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1bf2d01..ac692ab 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5259,7 +5259,7 @@ static int do_altp2m_op(
 
     if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
          (a.cmd != HVMOP_altp2m_set_domain_state) &&
-         !d->arch.altp2m_active )
+         !altp2m_active(d) )
     {
         rc = -EOPNOTSUPP;
         goto out;
@@ -5293,11 +5293,11 @@ static int do_altp2m_op(
             break;
         }
 
-        ostate = d->arch.altp2m_active;
-        d->arch.altp2m_active = !!a.u.domain_state.state;
+        ostate = altp2m_active(d);
+        set_altp2m_active(d, a.u.domain_state.state);
 
         /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->arch.altp2m_active != ostate &&
+        if ( altp2m_active(d) != ostate &&
              (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
         {
             for_each_vcpu( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bb7a329..699e9b1 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2018,7 +2018,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
     {
         v->arch.hvm_vmx.secondary_exec_control |= mask;
         __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
-        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->eptp));
 
         if ( cpu_has_vmx_virt_exceptions )
         {
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 62801ae..7917a1e 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -87,20 +87,20 @@ altp2m_domain_init(struct domain *d)
         return 0;
 
     /* Init alternate p2m data. */
-    if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+    if ( (d->arch.altp2m->eptp = alloc_xenheap_page()) == NULL )
         return -ENOMEM;
 
     for ( i = 0; i < MAX_EPTP; i++ )
-        d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+        d->arch.altp2m->eptp[i] = mfn_x(INVALID_MFN);
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+        rc = p2m_alloc_table(d->arch.altp2m->p2m[i]);
         if ( rc != 0 )
            return rc;
     }
 
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, false);
 
     return rc;
 }
@@ -113,13 +113,13 @@ altp2m_domain_teardown(struct domain *d)
     if ( !hvm_altp2m_supported() )
         return;
 
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, false);
 
-    free_xenheap_page(d->arch.altp2m_eptp);
-    d->arch.altp2m_eptp = NULL;
+    free_xenheap_page(d->arch.altp2m->eptp);
+    d->arch.altp2m->eptp = NULL;
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
-        p2m_teardown(d->arch.altp2m_p2m[i]);
+        p2m_teardown(d->arch.altp2m->p2m[i]);
 }
 
 /*
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index e7c6b74..3fd8380 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -877,7 +877,7 @@ int mem_sharing_nominate_page(struct domain *d,
 
         for ( i = 0; i < MAX_ALTP2M; i++ )
         {
-            ap2m = d->arch.altp2m_p2m[i];
+            ap2m = d->arch.altp2m->p2m[i];
             if ( !ap2m )
                 continue;
 
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 74fdfc1..71d3891 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -266,8 +266,8 @@ declare_mm_order_constraint(per_page_sharing)
  */
 
 declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
-#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m->list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->list_lock)
 
 /* P2M lock (per-altp2m-table)
  *
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 04878f5..9459d87 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1331,14 +1331,14 @@ void setup_ept_dump(void)
 
 void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
 {
-    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+    struct p2m_domain *p2m = d->arch.altp2m->p2m[i];
     struct ept_data *ept;
 
     p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
-    p2m->max_remapped_gfn = 0;
+    p2m->max_remapped_gfn = gfn_x(_gfn(0UL));
     ept = &p2m->ept;
     ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
-    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+    d->arch.altp2m->eptp[i] = ept_get_eptp(ept);
 }
 
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
@@ -1351,10 +1351,10 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, 
uint64_t eptp)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+        if ( d->arch.altp2m->eptp[i] == mfn_x(INVALID_MFN) )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->p2m[i];
         ept = &p2m->ept;
 
         if ( eptp == ept_get_eptp(ept) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index fa1ad4a..970a56c 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -191,14 +191,17 @@ static void p2m_teardown_altp2m(struct domain *d)
     unsigned int i;
     struct p2m_domain *p2m;
 
+    if ( d->arch.altp2m == NULL )
+       return;
+
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( !d->arch.altp2m_p2m[i] )
-            continue;
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->p2m[i];
         p2m_free_one(p2m);
-        d->arch.altp2m_p2m[i] = NULL;
+        d->arch.altp2m->p2m[i] = NULL;
     }
+
+    xfree(d->arch.altp2m);
 }
 
 static int p2m_init_altp2m(struct domain *d)
@@ -206,10 +209,14 @@ static int p2m_init_altp2m(struct domain *d)
     unsigned int i;
     struct p2m_domain *p2m;
 
-    mm_lock_init(&d->arch.altp2m_list_lock);
+    d->arch.altp2m = xzalloc(struct altp2m_domain);
+    if ( d->arch.altp2m == NULL )
+         return -ENOMEM;
+
+    mm_lock_init(&d->arch.altp2m->list_lock);
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+        d->arch.altp2m->p2m[i] = p2m = p2m_init_one(d);
         if ( p2m == NULL )
         {
             p2m_teardown_altp2m(d);
@@ -1845,10 +1852,10 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+             d->arch.altp2m->eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
             return -EINVAL;
 
-        ap2m = d->arch.altp2m_p2m[altp2m_idx];
+        ap2m = d->arch.altp2m->p2m[altp2m_idx];
     }
 
     switch ( access )
@@ -2289,7 +2296,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+    if ( d->arch.altp2m->eptp[idx] != mfn_x(INVALID_MFN) )
     {
         if ( idx != vcpu_altp2m(v).p2midx )
         {
@@ -2374,11 +2381,11 @@ void p2m_flush_altp2m(struct domain *d)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        p2m_flush_table(d->arch.altp2m_p2m[i]);
+        p2m_flush_table(d->arch.altp2m->p2m[i]);
         /* Uninit and reinit ept to force TLB shootdown */
-        ept_p2m_uninit(d->arch.altp2m_p2m[i]);
-        ept_p2m_init(d->arch.altp2m_p2m[i]);
-        d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+        ept_p2m_uninit(d->arch.altp2m->p2m[i]);
+        ept_p2m_init(d->arch.altp2m->p2m[i]);
+        d->arch.altp2m->eptp[i] = mfn_x(INVALID_MFN);
     }
 
     altp2m_list_unlock(d);
@@ -2393,7 +2400,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int 
idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
+    if ( d->arch.altp2m->eptp[idx] == mfn_x(INVALID_MFN) )
     {
         p2m_init_altp2m_ept(d, idx);
         rc = 0;
@@ -2412,7 +2419,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+        if ( d->arch.altp2m->eptp[i] != mfn_x(INVALID_MFN) )
             continue;
 
         p2m_init_altp2m_ept(d, i);
@@ -2438,17 +2445,17 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+    if ( d->arch.altp2m->eptp[idx] != mfn_x(INVALID_MFN) )
     {
-        p2m = d->arch.altp2m_p2m[idx];
+        p2m = d->arch.altp2m->p2m[idx];
 
         if ( !_atomic_read(p2m->active_vcpus) )
         {
-            p2m_flush_table(d->arch.altp2m_p2m[idx]);
+            p2m_flush_table(d->arch.altp2m->p2m[idx]);
             /* Uninit and reinit ept to force TLB shootdown */
-            ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
-            ept_p2m_init(d->arch.altp2m_p2m[idx]);
-            d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
+            ept_p2m_uninit(d->arch.altp2m->p2m[idx]);
+            ept_p2m_init(d->arch.altp2m->p2m[idx]);
+            d->arch.altp2m->eptp[idx] = mfn_x(INVALID_MFN);
             rc = 0;
         }
     }
@@ -2472,7 +2479,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, 
unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+    if ( d->arch.altp2m->eptp[idx] != mfn_x(INVALID_MFN) )
     {
         for_each_vcpu( d, v )
             if ( idx != vcpu_altp2m(v).p2midx )
@@ -2503,11 +2510,11 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned 
int idx,
     unsigned int page_order;
     int rc = -EINVAL;
 
-    if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
+    if ( idx >= MAX_ALTP2M || d->arch.altp2m->eptp[idx] == mfn_x(INVALID_MFN) )
         return rc;
 
     hp2m = p2m_get_hostp2m(d);
-    ap2m = d->arch.altp2m_p2m[idx];
+    ap2m = d->arch.altp2m->p2m[idx];
 
     p2m_lock(ap2m);
 
@@ -2599,10 +2606,10 @@ void p2m_altp2m_propagate_change(struct domain *d, 
gfn_t gfn,
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+        if ( d->arch.altp2m->eptp[i] == mfn_x(INVALID_MFN) )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->p2m[i];
         m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
 
         /* Check for a dropped page that may impact this altp2m */
@@ -2623,10 +2630,10 @@ void p2m_altp2m_propagate_change(struct domain *d, 
gfn_t gfn,
                 for ( i = 0; i < MAX_ALTP2M; i++ )
                 {
                     if ( i == last_reset_idx ||
-                         d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+                         d->arch.altp2m->eptp[i] == mfn_x(INVALID_MFN) )
                         continue;
 
-                    p2m = d->arch.altp2m_p2m[i];
+                    p2m = d->arch.altp2m->p2m[i];
                     p2m_lock(p2m);
                     p2m_reset_altp2m(p2m);
                     p2m_unlock(p2m);
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index c73d1d5..d6b3f90 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -24,6 +24,7 @@
 #include <xen/sched.h>
 #include <xen/vm_event.h>
 #include <xsm/xsm.h>
+#include <asm/p2m.h>
 #include <asm/altp2m.h>
 #include <asm/monitor.h>
 #include <asm/vm_event.h>
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 0090c89..65a7ead 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -24,7 +24,12 @@
 /* Alternate p2m HVM on/off per domain */
 static inline bool_t altp2m_active(const struct domain *d)
 {
-    return d->arch.altp2m_active;
+    return d->arch.altp2m->active;
+}
+
+static inline void set_altp2m_active(const struct domain *d, bool_t v)
+{
+    d->arch.altp2m->active = v;
 }
 
 /* Alternate p2m VCPU */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 5807a1f..3b9fcc1 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -242,6 +242,7 @@ typedef xen_domctl_cpuid_t cpuid_input_t;
 #define INVALID_ALTP2M  0xffff
 #define MAX_EPTP        (PAGE_SIZE / sizeof(uint64_t))
 struct p2m_domain;
+struct altp2m_domain;
 struct time_scale {
     int shift;
     u32 mul_frac;
@@ -320,10 +321,7 @@ struct arch_domain
     mm_lock_t nested_p2m_lock;
 
     /* altp2m: allow multiple copies of host p2m */
-    bool_t altp2m_active;
-    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
-    mm_lock_t altp2m_list_lock;
-    uint64_t *altp2m_eptp;
+    struct altp2m_domain *altp2m;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 0c5b391..cf30924 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -338,6 +338,13 @@ struct p2m_domain {
     };
 };
 
+struct altp2m_domain {
+    bool_t active;
+    struct p2m_domain *p2m[MAX_ALTP2M];
+    mm_lock_t list_lock;
+    uint64_t *eptp;
+};
+
 /* get host p2m table */
 #define p2m_get_hostp2m(d)      ((d)->arch.p2m)
 
@@ -778,7 +785,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu 
*v)
 
     BUG_ON(index >= MAX_ALTP2M);
 
-    return v->domain->arch.altp2m_p2m[index];
+    return v->domain->arch.altp2m->p2m[index];
 }
 
 /* Switch alternate p2m for a single vcpu */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.