[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 shadow: Move the shadow linear mapping for n-on-3-on-4 shadows so



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1202725013 0
# Node ID 9541494c0945305071bf258ad23fbdaff423c2a6
# Parent  49f87f3c2cb8633a516987b7da44945568245e75
x86 shadow: Move the shadow linear mapping for n-on-3-on-4 shadows so
that guest mappings of the bottom 4GB are not reflected in the monitor
pagetable. This ensures in particular that page 0 is not mapped,
allowing us to catch NULL dereferences in the hypervisor.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/multi.c |  108 +++++++++++++++++++++++++++--------------
 1 files changed, 72 insertions(+), 36 deletions(-)

diff -r 49f87f3c2cb8 -r 9541494c0945 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Feb 11 10:15:07 2008 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Feb 11 10:16:53 2008 +0000
@@ -1470,10 +1470,14 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3)),
                             __PAGE_HYPERVISOR);
 
-    /* Linear mapping */
+    /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
+     * shadows on 64-bit xen, this linear mapping is later replaced by the
+     * monitor pagetable structure, which is built in make_monitor_table
+     * and maintained by sh_update_linear_entries. */
     sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
         shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);
 
+    /* Self linear mapping.  */
     if ( shadow_mode_translate(v->domain) && !shadow_mode_external(v->domain) )
     {
         // linear tables may not be used with translated PV guests
@@ -1745,7 +1749,7 @@ sh_make_monitor_table(struct vcpu *v)
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
     
     /* Guarantee we can get the memory we need */
-    shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS - 1);
+    shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
 
 #if CONFIG_PAGING_LEVELS == 4    
     {
@@ -1755,22 +1759,34 @@ sh_make_monitor_table(struct vcpu *v)
         /* Remember the level of this table */
         mfn_to_page(m4mfn)->shadow_flags = 4;
 #if SHADOW_PAGING_LEVELS < 4
-        // Install a monitor l3 table in slot 0 of the l4 table.
-        // This is used for shadow linear maps.
-        {
-            mfn_t m3mfn; 
+        {
+            mfn_t m3mfn, m2mfn;
             l4_pgentry_t *l4e;
+            l3_pgentry_t *l3e;
+            /* Install an l3 table and an l2 table that will hold the shadow 
+             * linear map entries.  This overrides the linear map entry that 
+             * was installed by sh_install_xen_entries_in_l4. */
+            l4e = sh_map_domain_page(m4mfn);
+
             m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
             mfn_to_page(m3mfn)->shadow_flags = 3;
-            l4e = sh_map_domain_page(m4mfn);
-            l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
-            sh_unmap_domain_page(l4e);
+            l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
+                = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+
+            m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+            mfn_to_page(m2mfn)->shadow_flags = 2;
+            l3e = sh_map_domain_page(m3mfn);
+            l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR);
+            sh_unmap_domain_page(l3e);
+
             if ( is_pv_32on64_vcpu(v) )
             {
-                // Install a monitor l2 table in slot 3 of the l3 table.
-                // This is used for all Xen entries.
-                mfn_t m2mfn;
-                l3_pgentry_t *l3e;
+                /* For 32-on-64 PV guests, we need to map the 32-bit Xen
+                 * area into its usual VAs in the monitor tables */
+                m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+                mfn_to_page(m3mfn)->shadow_flags = 3;
+                l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
+                
                 m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m2mfn)->shadow_flags = 2;
                 l3e = sh_map_domain_page(m3mfn);
@@ -1778,6 +1794,8 @@ sh_make_monitor_table(struct vcpu *v)
                 sh_install_xen_entries_in_l2h(v, m2mfn);
                 sh_unmap_domain_page(l3e);
             }
+
+            sh_unmap_domain_page(l4e);
         }
 #endif /* SHADOW_PAGING_LEVELS < 4 */
         return m4mfn;
@@ -2181,21 +2199,34 @@ void sh_destroy_monitor_table(struct vcp
     ASSERT(mfn_to_shadow_page(mmfn)->type == SH_type_monitor_table);
 
 #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
-    /* Need to destroy the l3 monitor page in slot 0 too */
     {
         mfn_t m3mfn;
         l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
-        ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
-        m3mfn = _mfn(l4e_get_pfn(l4e[0]));
+        l3_pgentry_t *l3e;
+        int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
+ 
+        /* Need to destroy the l3 and l2 monitor pages used 
+         * for the linear map */
+        ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
+        m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
+        l3e = sh_map_domain_page(m3mfn);
+        ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
+        shadow_free(d, _mfn(l3e_get_pfn(l3e[0])));
+        sh_unmap_domain_page(l3e);
+        shadow_free(d, m3mfn);
+
         if ( is_pv_32on64_vcpu(v) )
         {
-            /* Need to destroy the l2 monitor page in slot 3 too */
-            l3_pgentry_t *l3e = sh_map_domain_page(m3mfn);
+            /* Need to destroy the l3 and l2 monitor pages that map the
+             * Xen VAs at 3GB-4GB */
+            ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
+            m3mfn = _mfn(l4e_get_pfn(l4e[0]));
+            l3e = sh_map_domain_page(m3mfn);
             ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
             shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
             sh_unmap_domain_page(l3e);
-        }
-        shadow_free(d, m3mfn);
+            shadow_free(d, m3mfn);
+        }
         sh_unmap_domain_page(l4e);
     }
 #elif CONFIG_PAGING_LEVELS == 3
@@ -3222,28 +3253,33 @@ sh_update_linear_entries(struct vcpu *v)
 
     if ( shadow_mode_external(d) )
     {
-        /* Install copies of the shadow l3es into the monitor l3 table.
-         * The monitor l3 table is hooked into slot 0 of the monitor
-         * l4 table, so we use l3 linear indices 0 to 3 */
+        /* Install copies of the shadow l3es into the monitor l2 table
+         * that maps SH_LINEAR_PT_VIRT_START. */
         shadow_l3e_t *sl3e;
-        l3_pgentry_t *ml3e;
-        mfn_t l3mfn;
+        l2_pgentry_t *ml2e;
         int i;
 
         /* Use linear mappings if we can; otherwise make new mappings */
-        if ( v == current ) 
-        {
-            ml3e = __linear_l3_table;
-            l3mfn = _mfn(l4e_get_pfn(__linear_l4_table[0]));
-        }
+        if ( v == current )
+            ml2e = __linear_l2_table
+                + l2_linear_offset(SH_LINEAR_PT_VIRT_START);
         else 
         {   
+            mfn_t l3mfn, l2mfn;
             l4_pgentry_t *ml4e;
+            l3_pgentry_t *ml3e;
+            int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
             ml4e = 
sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
-            ASSERT(l4e_get_flags(ml4e[0]) & _PAGE_PRESENT);
-            l3mfn = _mfn(l4e_get_pfn(ml4e[0]));
+
+            ASSERT(l4e_get_flags(ml4e[linear_slot]) & _PAGE_PRESENT);
+            l3mfn = _mfn(l4e_get_pfn(ml4e[linear_slot]));
             ml3e = sh_map_domain_page(l3mfn);
             sh_unmap_domain_page(ml4e);
+
+            ASSERT(l3e_get_flags(ml3e[0]) & _PAGE_PRESENT);
+            l2mfn = _mfn(l3e_get_pfn(ml3e[0]));
+            ml2e = sh_map_domain_page(l2mfn);
+            sh_unmap_domain_page(ml3e);
         }
 
         /* Shadow l3 tables are made up by sh_update_cr3 */
@@ -3251,15 +3287,15 @@ sh_update_linear_entries(struct vcpu *v)
 
         for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
         {
-            ml3e[i] = 
+            ml2e[i] = 
                 (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT) 
-                ? l3e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])), 
+                ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
                                __PAGE_HYPERVISOR) 
-                : l3e_empty();
+                : l2e_empty();
         }
 
         if ( v != current ) 
-            sh_unmap_domain_page(ml3e);
+            sh_unmap_domain_page(ml2e);
     }
     else
         domain_crash(d); /* XXX */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.