[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Make map_domain_page_global fail



# HG changeset patch
# User Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>
# Date 1179999903 -3600
# Node ID 471478a1b89e2681c3b1efa3abde6ec47eb36d05
# Parent  96915ca8d5f239062f889970279c5d90296a8a96
Make map_domain_page_global fail

When the global mapping cache runs out, make map_domain_page_global
return NULL on failure rather than fire an assertion failure.  This
also updates the callers to handle the error gracefully.

The only exception to this is the shadow pagetable code, which uses
map_domain_page_global to create a mapping for
v->arch.paging.shadow.guest_vtable; it's not clear this needs to be a
global mapping anyway.

Signed-off-by: Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vlapic.c         |   15 +++++++++++----
 xen/arch/x86/mm/shadow/multi.c    |    5 +++++
 xen/arch/x86/x86_32/domain_page.c |   16 ++++++++++++----
 3 files changed, 28 insertions(+), 8 deletions(-)

diff -r 96915ca8d5f2 -r 471478a1b89e xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Thu May 24 10:18:55 2007 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Thu May 24 10:45:03 2007 +0100
@@ -918,12 +918,19 @@ int vlapic_init(struct vcpu *v)
     vlapic->regs_page = alloc_domheap_page(NULL);
     if ( vlapic->regs_page == NULL )
     {
+        dprintk(XENLOG_ERR, "malloc vlapic regs_page error for vcpu %x\n",
+                v->vcpu_id);
+        return -ENOMEM;
+    }
+
+    vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+    if ( vlapic->regs == NULL )
+    {
         dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
                 v->vcpu_id);
-        return -ENOMEM;
-    }
-
-    vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
+       return -ENOMEM;
+    }
+
     memset(vlapic->regs, 0, PAGE_SIZE);
 
     vlapic_reset(vlapic);
diff -r 96915ca8d5f2 -r 471478a1b89e xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu May 24 10:18:55 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu May 24 10:45:03 2007 +0100
@@ -3485,6 +3485,8 @@ sh_update_cr3(struct vcpu *v, int do_loc
         if ( v->arch.paging.shadow.guest_vtable )
             sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+        /* PAGING_LEVELS==4 implies 64-bit, which means that
+         * map_domain_page_global can't fail */
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;
@@ -3515,6 +3517,9 @@ sh_update_cr3(struct vcpu *v, int do_loc
         if ( v->arch.paging.shadow.guest_vtable )
             sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable);
         v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn);
+        /* Does this really need map_domain_page_global?  Handle the
+         * error properly if so. */
+        ASSERT( v->arch.paging.shadow.guest_vtable );
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l2_table;
diff -r 96915ca8d5f2 -r 471478a1b89e xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Thu May 24 10:18:55 2007 +0100
+++ b/xen/arch/x86/x86_32/domain_page.c Thu May 24 10:45:03 2007 +0100
@@ -218,17 +218,25 @@ void *map_domain_page_global(unsigned lo
 
         idx = find_first_zero_bit(inuse, GLOBALMAP_BITS);
         va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
-        ASSERT(va < FIXADDR_START);
+        if ( va >= FIXADDR_START )
+        {
+            va = 0;
+            goto fail;
+        }
     }
 
     set_bit(idx, inuse);
     inuse_cursor = idx + 1;
 
+  fail:
     spin_unlock(&globalmap_lock);
 
-    pl2e = virt_to_xen_l2e(va);
-    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
-    l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    if ( likely(va != 0) )
+    {
+       pl2e = virt_to_xen_l2e(va);
+       pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
+       l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    }
 
     return (void *)va;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.