[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Fix BUG() crash in mm.c.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1172574036 0
# Node ID 8bd56d9cc6c759abd2d4c9e1e7724def71d70eb4
# Parent  1c5e6239a8d0381fdbf56d4926f986d7f0ec07c0
x86: Fix BUG() crash in mm.c.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c |  117 ++++++++++++++++++++++++++----------------------------
 1 files changed, 58 insertions(+), 59 deletions(-)

diff -r 1c5e6239a8d0 -r 8bd56d9cc6c7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sun Feb 25 23:58:33 2007 -0600
+++ b/xen/arch/x86/mm.c Tue Feb 27 11:00:36 2007 +0000
@@ -157,11 +157,8 @@ l2_pgentry_t *compat_idle_pg_table_l2 = 
 
 static void queue_deferred_ops(struct domain *d, unsigned int ops)
 {
-    if ( d == current->domain )
-        this_cpu(percpu_mm_info).deferred_ops |= ops;
-    else
-        BUG_ON(!test_bit(_DOMF_paused, &d->domain_flags) ||
-               !cpus_empty(d->domain_dirty_cpumask));
+    ASSERT(d == current->domain);
+    this_cpu(percpu_mm_info).deferred_ops |= ops;
 }
 
 void __init init_frametable(void)
@@ -1576,7 +1573,10 @@ void free_page_type(struct page_info *pa
          * (e.g., update_va_mapping()) or we could end up modifying a page
          * that is no longer a page table (and hence screw up ref counts).
          */
-        queue_deferred_ops(owner, DOP_FLUSH_ALL_TLBS);
+        if ( current->domain == owner )
+            queue_deferred_ops(owner, DOP_FLUSH_ALL_TLBS);
+        else
+            flush_tlb_mask(owner->domain_dirty_cpumask);
 
         if ( unlikely(paging_mode_enabled(owner)) )
         {
@@ -1950,8 +1950,6 @@ int do_mmuext_op(
     struct vcpu *v = current;
     struct domain *d = v->domain;
 
-    LOCK_BIGLOCK(d);
-
     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
     {
         count &= ~MMU_UPDATE_PREEMPTED;
@@ -1959,17 +1957,19 @@ int do_mmuext_op(
             (void)copy_from_guest(&done, pdone, 1);
     }
 
+    if ( unlikely(!guest_handle_okay(uops, count)) )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
     if ( !set_foreigndom(foreigndom) )
     {
         rc = -ESRCH;
         goto out;
     }
 
-    if ( unlikely(!guest_handle_okay(uops, count)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
+    LOCK_BIGLOCK(d);
 
     for ( i = 0; i < count; i++ )
     {
@@ -2072,38 +2072,36 @@ int do_mmuext_op(
             break;
         
 #ifdef __x86_64__
-        case MMUEXT_NEW_USER_BASEPTR:
-            if ( IS_COMPAT(FOREIGNDOM) )
-            {
-                okay = 0;
-                break;
-            }
-            if (likely(mfn != 0))
+        case MMUEXT_NEW_USER_BASEPTR: {
+            unsigned long old_mfn;
+
+            if ( mfn != 0 )
             {
                 if ( paging_mode_refcounts(d) )
                     okay = get_page_from_pagenr(mfn, d);
                 else
                     okay = get_page_and_type_from_pagenr(
                         mfn, PGT_root_page_table, d);
-            }
-            if ( unlikely(!okay) )
-            {
-                MEM_LOG("Error while installing new mfn %lx", mfn);
-            }
-            else
-            {
-                unsigned long old_mfn =
-                    pagetable_get_pfn(v->arch.guest_table_user);
-                v->arch.guest_table_user = pagetable_from_pfn(mfn);
-                if ( old_mfn != 0 )
+                if ( unlikely(!okay) )
                 {
-                    if ( paging_mode_refcounts(d) )
-                        put_page(mfn_to_page(old_mfn));
-                    else
-                        put_page_and_type(mfn_to_page(old_mfn));
+                    MEM_LOG("Error while installing new mfn %lx", mfn);
+                    break;
                 }
             }
+
+            old_mfn = pagetable_get_pfn(v->arch.guest_table_user);
+            v->arch.guest_table_user = pagetable_from_pfn(mfn);
+
+            if ( old_mfn != 0 )
+            {
+                if ( paging_mode_refcounts(d) )
+                    put_page(mfn_to_page(old_mfn));
+                else
+                    put_page_and_type(mfn_to_page(old_mfn));
+            }
+
             break;
+        }
 #endif
         
         case MMUEXT_TLB_FLUSH_LOCAL:
@@ -2202,9 +2200,11 @@ int do_mmuext_op(
         guest_handle_add_offset(uops, 1);
     }
 
+    process_deferred_ops();
+
+    UNLOCK_BIGLOCK(d);
+
  out:
-    process_deferred_ops();
-
     /* Add incremental work we have done to the @done output parameter. */
     if ( unlikely(!guest_handle_is_null(pdone)) )
     {
@@ -2212,7 +2212,6 @@ int do_mmuext_op(
         copy_to_guest(pdone, &done, 1);
     }
 
-    UNLOCK_BIGLOCK(d);
     return rc;
 }
 
@@ -2233,8 +2232,6 @@ int do_mmu_update(
     unsigned long type_info;
     struct domain_mmap_cache mapcache, sh_mapcache;
 
-    LOCK_BIGLOCK(d);
-
     if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
     {
         count &= ~MMU_UPDATE_PREEMPTED;
@@ -2242,23 +2239,25 @@ int do_mmu_update(
             (void)copy_from_guest(&done, pdone, 1);
     }
 
+    if ( unlikely(!guest_handle_okay(ureqs, count)) )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    if ( !set_foreigndom(foreigndom) )
+    {
+        rc = -ESRCH;
+        goto out;
+    }
+
     domain_mmap_cache_init(&mapcache);
     domain_mmap_cache_init(&sh_mapcache);
 
-    if ( !set_foreigndom(foreigndom) )
-    {
-        rc = -ESRCH;
-        goto out;
-    }
-
     perfc_incrc(calls_to_mmu_update);
     perfc_addc(num_page_updates, count);
 
-    if ( unlikely(!guest_handle_okay(ureqs, count)) )
-    {
-        rc = -EFAULT;
-        goto out;
-    }
+    LOCK_BIGLOCK(d);
 
     for ( i = 0; i < count; i++ )
     {
@@ -2342,12 +2341,11 @@ int do_mmu_update(
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
                 case PGT_l4_page_table:
-                    if ( !IS_COMPAT(FOREIGNDOM) )
-                    {
-                        l4_pgentry_t l4e = l4e_from_intpte(req.val);
-                        okay = mod_l4_entry(d, va, l4e, mfn);
-                    }
-                    break;
+                {
+                    l4_pgentry_t l4e = l4e_from_intpte(req.val);
+                    okay = mod_l4_entry(d, va, l4e, mfn);
+                }
+                break;
 #endif
                 }
 
@@ -2414,12 +2412,14 @@ int do_mmu_update(
         guest_handle_add_offset(ureqs, 1);
     }
 
- out:
     domain_mmap_cache_destroy(&mapcache);
     domain_mmap_cache_destroy(&sh_mapcache);
 
     process_deferred_ops();
 
+    UNLOCK_BIGLOCK(d);
+
+ out:
     /* Add incremental work we have done to the @done output parameter. */
     if ( unlikely(!guest_handle_is_null(pdone)) )
     {
@@ -2427,7 +2427,6 @@ int do_mmu_update(
         copy_to_guest(pdone, &done, 1);
     }
 
-    UNLOCK_BIGLOCK(d);
     return rc;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.