[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/p2m: allocate CPU masks dynamically



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319182967 -7200
# Node ID 2682094bc243f96d1187271595ecefee333ec11d
# Parent  253073b522f8fcde6de44db9955c89257cde84d8
x86/p2m: allocate CPU masks dynamically

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


--- 2011-10-18.orig/xen/arch/x86/hvm/nestedhvm.c        2011-10-11 
17:24:46.000000000 +0200
+++ 2011-10-18/xen/arch/x86/hvm/nestedhvm.c     2011-10-18 16:45:02.000000000 
+0200
@@ -114,9 +114,9 @@ nestedhvm_flushtlb_ipi(void *info)
 void
 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
 {
-    on_selected_cpus(&p2m->p2m_dirty_cpumask, nestedhvm_flushtlb_ipi,
+    on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
         p2m->domain, 1);
-    cpumask_clear(&p2m->p2m_dirty_cpumask);
+    cpumask_clear(p2m->dirty_cpumask);
 }

 bool_t
--- 2011-10-18.orig/xen/arch/x86/mm/hap/nested_hap.c    2011-10-21 
09:24:51.000000000 +0200
+++ 2011-10-18/xen/arch/x86/mm/hap/nested_hap.c 2011-10-18 16:44:35.000000000 
+0200
@@ -88,7 +88,7 @@ nestedp2m_write_p2m_entry(struct p2m_dom
     safe_write_pte(p, new);

     if (old_flags & _PAGE_PRESENT)
-        flush_tlb_mask(&p2m->p2m_dirty_cpumask);
+        flush_tlb_mask(p2m->dirty_cpumask);

     paging_unlock(d);
 }
--- 2011-10-18.orig/xen/arch/x86/mm/p2m.c       2011-10-14 09:47:46.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/mm/p2m.c    2011-10-21 09:28:33.000000000 +0200
@@ -81,7 +81,6 @@ static void p2m_initialise(struct domain
     p2m->default_access = p2m_access_rwx;

     p2m->cr3 = CR3_EADDR;
-    cpumask_clear(&p2m->p2m_dirty_cpumask);

     if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(p2m);
@@ -102,6 +101,8 @@ p2m_init_nestedp2m(struct domain *d)
         d->arch.nested_p2m[i] = p2m = xzalloc(struct p2m_domain);
         if (p2m == NULL)
             return -ENOMEM;
+        if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+            return -ENOMEM;
         p2m_initialise(d, p2m);
         p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
         list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
@@ -118,6 +119,11 @@ int p2m_init(struct domain *d)
     p2m_get_hostp2m(d) = p2m = xzalloc(struct p2m_domain);
     if ( p2m == NULL )
         return -ENOMEM;
+    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+    {
+        xfree(p2m);
+        return -ENOMEM;
+    }
     p2m_initialise(d, p2m);

     /* Must initialise nestedp2m unconditionally
@@ -333,6 +339,9 @@ static void p2m_teardown_nestedp2m(struc
     uint8_t i;

     for (i = 0; i < MAX_NESTEDP2M; i++) {
+        if ( !d->arch.nested_p2m[i] )
+            continue;
+        free_cpumask_var(d->arch.nested_p2m[i]->dirty_cpumask);
         xfree(d->arch.nested_p2m[i]);
         d->arch.nested_p2m[i] = NULL;
     }
@@ -341,8 +350,12 @@ static void p2m_teardown_nestedp2m(struc
 void p2m_final_teardown(struct domain *d)
 {
     /* Iterate over all p2m tables per domain */
-    xfree(d->arch.p2m);
-    d->arch.p2m = NULL;
+    if ( d->arch.p2m )
+    {
+        free_cpumask_var(d->arch.p2m->dirty_cpumask);
+        xfree(d->arch.p2m);
+        d->arch.p2m = NULL;
+    }

     /* We must teardown unconditionally because
      * we initialise them unconditionally.
@@ -1200,7 +1213,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
             if (p2m->cr3 == CR3_EADDR)
                 hvm_asid_flush_vcpu(v);
             p2m->cr3 = cr3;
-            cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+            cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
             p2m_unlock(p2m);
             nestedp2m_unlock(d);
             return p2m;
@@ -1217,7 +1230,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
     p2m->cr3 = cr3;
     nv->nv_flushp2m = 0;
     hvm_asid_flush_vcpu(v);
-    cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
     p2m_unlock(p2m);
     nestedp2m_unlock(d);

--- 2011-10-18.orig/xen/include/asm-x86/p2m.h   2011-10-21 09:24:51.000000000 
+0200
+++ 2011-10-18/xen/include/asm-x86/p2m.h        2011-10-18 16:39:34.000000000 
+0200
@@ -198,7 +198,7 @@ struct p2m_domain {
      * this p2m and those physical cpus whose vcpu's are in
      * guestmode.
      */
-    cpumask_t          p2m_dirty_cpumask;
+    cpumask_var_t      dirty_cpumask;

     struct domain     *domain;   /* back pointer to domain */

diff -r 253073b522f8 -r 2682094bc243 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c      Fri Oct 21 09:23:05 2011 +0200
+++ b/xen/arch/x86/hvm/nestedhvm.c      Fri Oct 21 09:42:47 2011 +0200
@@ -114,9 +114,9 @@
 void
 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
 {
-    on_selected_cpus(&p2m->p2m_dirty_cpumask, nestedhvm_flushtlb_ipi,
+    on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
         p2m->domain, 1);
-    cpumask_clear(&p2m->p2m_dirty_cpumask);
+    cpumask_clear(p2m->dirty_cpumask);
 }
 
 bool_t
diff -r 253073b522f8 -r 2682094bc243 xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Fri Oct 21 09:23:05 2011 +0200
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Fri Oct 21 09:42:47 2011 +0200
@@ -88,7 +88,7 @@
     safe_write_pte(p, new);
 
     if (old_flags & _PAGE_PRESENT)
-        flush_tlb_mask(&p2m->p2m_dirty_cpumask);
+        flush_tlb_mask(p2m->dirty_cpumask);
     
     paging_unlock(d);
 }
diff -r 253073b522f8 -r 2682094bc243 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Oct 21 09:23:05 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c     Fri Oct 21 09:42:47 2011 +0200
@@ -81,7 +81,6 @@
     p2m->default_access = p2m_access_rwx;
 
     p2m->cr3 = CR3_EADDR;
-    cpumask_clear(&p2m->p2m_dirty_cpumask);
 
     if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(p2m);
@@ -102,6 +101,8 @@
         d->arch.nested_p2m[i] = p2m = xzalloc(struct p2m_domain);
         if (p2m == NULL)
             return -ENOMEM;
+        if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+            return -ENOMEM;
         p2m_initialise(d, p2m);
         p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
         list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
@@ -118,6 +119,11 @@
     p2m_get_hostp2m(d) = p2m = xzalloc(struct p2m_domain);
     if ( p2m == NULL )
         return -ENOMEM;
+    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
+    {
+        xfree(p2m);
+        return -ENOMEM;
+    }
     p2m_initialise(d, p2m);
 
     /* Must initialise nestedp2m unconditionally
@@ -333,6 +339,9 @@
     uint8_t i;
 
     for (i = 0; i < MAX_NESTEDP2M; i++) {
+        if ( !d->arch.nested_p2m[i] )
+            continue;
+        free_cpumask_var(d->arch.nested_p2m[i]->dirty_cpumask);
         xfree(d->arch.nested_p2m[i]);
         d->arch.nested_p2m[i] = NULL;
     }
@@ -341,8 +350,12 @@
 void p2m_final_teardown(struct domain *d)
 {
     /* Iterate over all p2m tables per domain */
-    xfree(d->arch.p2m);
-    d->arch.p2m = NULL;
+    if ( d->arch.p2m )
+    {
+        free_cpumask_var(d->arch.p2m->dirty_cpumask);
+        xfree(d->arch.p2m);
+        d->arch.p2m = NULL;
+    }
 
     /* We must teardown unconditionally because
      * we initialise them unconditionally.
@@ -1305,7 +1318,7 @@
             if (p2m->cr3 == CR3_EADDR)
                 hvm_asid_flush_vcpu(v);
             p2m->cr3 = cr3;
-            cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+            cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
             p2m_unlock(p2m);
             nestedp2m_unlock(d);
             return p2m;
@@ -1322,7 +1335,7 @@
     p2m->cr3 = cr3;
     nv->nv_flushp2m = 0;
     hvm_asid_flush_vcpu(v);
-    cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+    cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
     p2m_unlock(p2m);
     nestedp2m_unlock(d);
 
diff -r 253073b522f8 -r 2682094bc243 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Oct 21 09:23:05 2011 +0200
+++ b/xen/include/asm-x86/p2m.h Fri Oct 21 09:42:47 2011 +0200
@@ -198,7 +198,7 @@
      * this p2m and those physical cpus whose vcpu's are in
      * guestmode.
      */
-    cpumask_t          p2m_dirty_cpumask;
+    cpumask_var_t      dirty_cpumask;
 
     struct domain     *domain;   /* back pointer to domain */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.