[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] 32on64 shadowing / live migration support for PV PAE compat guests



# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Date 1171915087 0
# Node ID 9c2e6f8f3aa7a4e2a1f3af3204789568edf975cd
# Parent  0b882c911b885a51308eee3ec80bc4a5a230d7ce
[XEN] 32on64 shadowing / live migration support for PV PAE compat guests
PAE compat guests on 64bit hypervisors are shadowed
using 4-on-4 with special handling for the top level
L4 page and the L2E M2P mappings.

Signed-off-by: Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c                 |   24 +++--
 xen/arch/x86/mm/shadow/common.c   |   52 ++++++++---
 xen/arch/x86/mm/shadow/multi.c    |  172 ++++++++++++++++++++++----------------
 xen/arch/x86/mm/shadow/private.h  |   17 +--
 xen/arch/x86/mm/shadow/types.h    |    1 
 xen/include/asm-x86/x86_64/page.h |   10 +-
 6 files changed, 172 insertions(+), 104 deletions(-)

diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/arch/x86/mm.c Mon Feb 19 19:58:07 2007 +0000
@@ -1147,7 +1147,7 @@ static int alloc_l4_table(struct page_in
 
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
     {
-        if ( is_guest_l4_slot(i) &&
+        if ( is_guest_l4_slot(d, i) &&
              unlikely(!get_page_from_l4e(pl4e[i], pfn, d)) )
             goto fail;
 
@@ -1173,7 +1173,7 @@ static int alloc_l4_table(struct page_in
  fail:
     MEM_LOG("Failure in alloc_l4_table: entry %d", i);
     while ( i-- > 0 )
-        if ( is_guest_l4_slot(i) )
+        if ( is_guest_l4_slot(d, i) )
             put_page_from_l4e(pl4e[i], pfn);
 
     return 0;
@@ -1248,12 +1248,13 @@ static void free_l3_table(struct page_in
 
 static void free_l4_table(struct page_info *page)
 {
+    struct domain *d = page_get_owner(page);
     unsigned long pfn = page_to_mfn(page);
     l4_pgentry_t *pl4e = page_to_virt(page);
     int           i;
 
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
-        if ( is_guest_l4_slot(i) )
+        if ( is_guest_l4_slot(d, i) )
             put_page_from_l4e(pl4e[i], pfn);
 }
 
@@ -1480,13 +1481,14 @@ static int mod_l3_entry(l3_pgentry_t *pl
 #if CONFIG_PAGING_LEVELS >= 4
 
 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
-static int mod_l4_entry(l4_pgentry_t *pl4e, 
+static int mod_l4_entry(struct domain *d,
+                        l4_pgentry_t *pl4e, 
                         l4_pgentry_t nl4e, 
                         unsigned long pfn)
 {
     l4_pgentry_t ol4e;
 
-    if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
+    if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
     {
         MEM_LOG("Illegal L4 update attempt in Xen-private area %p", pl4e);
         return 0;
@@ -1777,9 +1779,13 @@ int new_guest_cr3(unsigned long mfn)
     {
         okay = paging_mode_refcounts(d)
             ? 0 /* Old code was broken, but what should it be? */
-            : mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)),
-                           l4e_from_pfn(mfn, (_PAGE_PRESENT|_PAGE_RW|
-                                              _PAGE_USER|_PAGE_ACCESSED)), 0);
+            : mod_l4_entry(
+                    d,
+                    __va(pagetable_get_paddr(v->arch.guest_table)),
+                    l4e_from_pfn(
+                        mfn,
+                        (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
+                    pagetable_get_pfn(v->arch.guest_table));
         if ( unlikely(!okay) )
         {
             MEM_LOG("Error while installing new compat baseptr %lx", mfn);
@@ -2339,7 +2345,7 @@ int do_mmu_update(
                     if ( !IS_COMPAT(FOREIGNDOM) )
                     {
                         l4_pgentry_t l4e = l4e_from_intpte(req.val);
-                        okay = mod_l4_entry(va, l4e, mfn);
+                        okay = mod_l4_entry(d, va, l4e, mfn);
                     }
                     break;
 #endif
diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Feb 19 19:58:07 2007 +0000
@@ -485,7 +485,11 @@ void shadow_demote(struct vcpu *v, mfn_t
 {
     struct page_info *page = mfn_to_page(gmfn);
 
-    ASSERT(test_bit(_PGC_page_table, &page->count_info));
+#ifdef CONFIG_COMPAT
+    if ( !IS_COMPAT(v->domain) || type != SH_type_l4_64_shadow )
+#endif
+        ASSERT(test_bit(_PGC_page_table, &page->count_info));
+
     ASSERT(test_bit(type, &page->shadow_flags));
 
     clear_bit(type, &page->shadow_flags);
@@ -567,6 +571,9 @@ sh_validate_guest_entry(struct vcpu *v, 
     if ( page->shadow_flags & SHF_L2_64 ) 
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4, 4)
             (v, gmfn, entry, size);
+    if ( page->shadow_flags & SHF_L2H_64 ) 
+        result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4, 4)
+            (v, gmfn, entry, size);
     if ( page->shadow_flags & SHF_L3_64 ) 
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4, 4)
             (v, gmfn, entry, size);
@@ -575,7 +582,7 @@ sh_validate_guest_entry(struct vcpu *v, 
             (v, gmfn, entry, size);
 #else /* 32-bit/PAE hypervisor does not support 64-bit guests */
     ASSERT((page->shadow_flags 
-            & (SHF_L4_64|SHF_L3_64|SHF_L2_64|SHF_L1_64)) == 0);
+            & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
 #endif
 
     return result;
@@ -705,7 +712,7 @@ shadow_order(unsigned int shadow_type)
 shadow_order(unsigned int shadow_type) 
 {
 #if CONFIG_PAGING_LEVELS > 2
-    static const u32 type_to_order[16] = {
+    static const u32 type_to_order[SH_type_unused] = {
         0, /* SH_type_none           */
         1, /* SH_type_l1_32_shadow   */
         1, /* SH_type_fl1_32_shadow  */
@@ -717,12 +724,13 @@ shadow_order(unsigned int shadow_type)
         0, /* SH_type_l1_64_shadow   */
         0, /* SH_type_fl1_64_shadow  */
         0, /* SH_type_l2_64_shadow   */
+        0, /* SH_type_l2h_64_shadow  */
         0, /* SH_type_l3_64_shadow   */
         0, /* SH_type_l4_64_shadow   */
         2, /* SH_type_p2m_table      */
         0  /* SH_type_monitor_table  */
         };
-    ASSERT(shadow_type < 16);
+    ASSERT(shadow_type < SH_type_unused);
     return type_to_order[shadow_type];
 #else  /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
     return 0;
@@ -1564,6 +1572,9 @@ void sh_destroy_shadow(struct vcpu *v, m
            t == SH_type_fl1_pae_shadow ||  
            t == SH_type_fl1_64_shadow  || 
            t == SH_type_monitor_table  || 
+#ifdef CONFIG_COMPAT
+           (IS_COMPAT(v->domain) && t == SH_type_l4_64_shadow) ||
+#endif
            (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
             == v->domain)); 
 
@@ -1605,6 +1616,8 @@ void sh_destroy_shadow(struct vcpu *v, m
     case SH_type_fl1_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
         break;
+    case SH_type_l2h_64_shadow:
+        ASSERT( IS_COMPAT(v->domain) );
     case SH_type_l2_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
         break;
@@ -1633,7 +1646,7 @@ int sh_remove_write_access(struct vcpu *
                            unsigned long fault_addr)
 {
     /* Dispatch table for getting per-type functions */
-    static hash_callback_t callbacks[16] = {
+    static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
 #if CONFIG_PAGING_LEVELS == 2
         SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,2,2), /* l1_32   */
@@ -1660,6 +1673,7 @@ int sh_remove_write_access(struct vcpu *
         NULL, /* fl1_64  */
 #endif
         NULL, /* l2_64   */
+        NULL, /* l2h_64  */
         NULL, /* l3_64   */
         NULL, /* l4_64   */
         NULL, /* p2m     */
@@ -1822,7 +1836,7 @@ int sh_remove_all_mappings(struct vcpu *
     int expected_count, do_locking;
 
     /* Dispatch table for getting per-type functions */
-    static hash_callback_t callbacks[16] = {
+    static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
 #if CONFIG_PAGING_LEVELS == 2
         SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,2,2), /* l1_32   */
@@ -1849,6 +1863,7 @@ int sh_remove_all_mappings(struct vcpu *
         NULL, /* fl1_64  */
 #endif
         NULL, /* l2_64   */
+        NULL, /* l2h_64  */
         NULL, /* l3_64   */
         NULL, /* l4_64   */
         NULL, /* p2m     */
@@ -1956,6 +1971,7 @@ static int sh_remove_shadow_via_pointer(
 #if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l1_64_shadow:
     case SH_type_l2_64_shadow:
+    case SH_type_l2h_64_shadow:
     case SH_type_l3_64_shadow:
     case SH_type_l4_64_shadow:
         SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
@@ -1991,7 +2007,7 @@ void sh_remove_shadows(struct vcpu *v, m
     
     /* Dispatch table for getting per-type functions: each level must
      * be called with the function to remove a lower-level shadow. */
-    static hash_callback_t callbacks[16] = {
+    static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
         NULL, /* l1_32   */
         NULL, /* fl1_32  */
@@ -2013,10 +2029,12 @@ void sh_remove_shadows(struct vcpu *v, m
         NULL, /* fl1_64  */
 #if CONFIG_PAGING_LEVELS >= 4
         SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2_64   */
+        SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,4,4), /* l2h_64  */
         SHADOW_INTERNAL_NAME(sh_remove_l2_shadow,4,4), /* l3_64   */
         SHADOW_INTERNAL_NAME(sh_remove_l3_shadow,4,4), /* l4_64   */
 #else
         NULL, /* l2_64   */
+        NULL, /* l2h_64  */
         NULL, /* l3_64   */
         NULL, /* l4_64   */
 #endif
@@ -2025,7 +2043,7 @@ void sh_remove_shadows(struct vcpu *v, m
     };
 
     /* Another lookup table, for choosing which mask to use */
-    static unsigned int masks[16] = {
+    static unsigned int masks[SH_type_unused] = {
         0, /* none    */
         1 << SH_type_l2_32_shadow, /* l1_32   */
         0, /* fl1_32  */
@@ -2035,9 +2053,11 @@ void sh_remove_shadows(struct vcpu *v, m
         0, /* fl1_pae */
         0, /* l2_pae  */
         0, /* l2h_pae  */
-        1 << SH_type_l2_64_shadow, /* l1_64   */
+        ((1 << SH_type_l2h_64_shadow)
+         | (1 << SH_type_l2_64_shadow)),  /* l1_64   */
         0, /* fl1_64  */
         1 << SH_type_l3_64_shadow, /* l2_64   */
+        1 << SH_type_l3_64_shadow, /* l2h_64  */
         1 << SH_type_l4_64_shadow, /* l3_64   */
         0, /* l4_64   */
         0, /* p2m     */
@@ -2088,6 +2108,7 @@ void sh_remove_shadows(struct vcpu *v, m
 #if CONFIG_PAGING_LEVELS >= 4
     if ( sh_flags & SHF_L1_64 )   DO_UNSHADOW(SH_type_l1_64_shadow);
     if ( sh_flags & SHF_L2_64 )   DO_UNSHADOW(SH_type_l2_64_shadow);
+    if ( sh_flags & SHF_L2H_64 )  DO_UNSHADOW(SH_type_l2h_64_shadow);
     if ( sh_flags & SHF_L3_64 )   DO_UNSHADOW(SH_type_l3_64_shadow);
     if ( sh_flags & SHF_L4_64 )   DO_UNSHADOW(SH_type_l4_64_shadow);
 #endif
@@ -2157,10 +2178,7 @@ static void sh_update_paging_modes(struc
         /// PV guest
         ///
 #if CONFIG_PAGING_LEVELS == 4
-        if ( pv_32bit_guest(v) )
-            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-        else
-            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
+        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
 #elif CONFIG_PAGING_LEVELS == 3
         v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 #elif CONFIG_PAGING_LEVELS == 2
@@ -2691,6 +2709,11 @@ static int shadow_log_dirty_enable(struc
         goto out;
     }
 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
+    if ( IS_COMPAT(d) )
+        d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
+#endif
+
     ret = sh_alloc_log_dirty_bitmap(d);
     if ( ret != 0 )
     {
@@ -3016,7 +3039,7 @@ void shadow_audit_tables(struct vcpu *v)
 void shadow_audit_tables(struct vcpu *v) 
 {
     /* Dispatch table for getting per-type functions */
-    static hash_callback_t callbacks[16] = {
+    static hash_callback_t callbacks[SH_type_unused] = {
         NULL, /* none    */
 #if CONFIG_PAGING_LEVELS == 2
         SHADOW_INTERNAL_NAME(sh_audit_l1_table,2,2),  /* l1_32   */
@@ -3034,6 +3057,7 @@ void shadow_audit_tables(struct vcpu *v)
         SHADOW_INTERNAL_NAME(sh_audit_l1_table,4,4),  /* l1_64   */
         SHADOW_INTERNAL_NAME(sh_audit_fl1_table,4,4), /* fl1_64  */
         SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2_64   */
+        SHADOW_INTERNAL_NAME(sh_audit_l2_table,4,4),  /* l2h_64   */
         SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4),  /* l3_64   */
         SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4),  /* l4_64   */
 #endif /* CONFIG_PAGING_LEVELS >= 4 */
diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Feb 19 19:58:07 2007 +0000
@@ -162,8 +162,13 @@ set_shadow_status(struct vcpu *v, mfn_t 
     else
         mfn_to_shadow_page(smfn)->logdirty = 0;
 
-    res = get_page(mfn_to_page(gmfn), d);
-    ASSERT(res == 1);
+#ifdef CONFIG_COMPAT
+    if ( !IS_COMPAT(d) || shadow_type != SH_type_l4_64_shadow )
+#endif
+    {
+        res = get_page(mfn_to_page(gmfn), d);
+        ASSERT(res == 1);
+    }
 
     shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
 }
@@ -185,7 +190,10 @@ delete_shadow_status(struct vcpu *v, mfn
                    v->domain->domain_id, v->vcpu_id,
                    mfn_x(gmfn), shadow_type, mfn_x(smfn));
     shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
-    put_page(mfn_to_page(gmfn));
+#ifdef CONFIG_COMPAT
+    if ( !IS_COMPAT(v->domain) || shadow_type != SH_type_l4_64_shadow )
+#endif
+        put_page(mfn_to_page(gmfn));
 }
 
 /**************************************************************************/
@@ -764,7 +772,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) )
+    if ( (GUEST_PAGING_LEVELS == 4) && !IS_COMPAT(d) && !is_hvm_domain(d) )
     {
         sflags |= _PAGE_USER;
     }
@@ -1233,9 +1241,10 @@ do {                                    
 #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
 
 /* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)     \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)     \
 do {                                                                      \
     int _i, _j, __done = 0;                                               \
+    int _xen = !shadow_mode_external(_dom);                               \
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
     for ( _j = 0; _j < 4 && !__done; _j++ )                               \
     {                                                                     \
@@ -1259,9 +1268,10 @@ do {                                    
 #elif GUEST_PAGING_LEVELS == 2
 
 /* 32-bit on 32-bit: avoid Xen entries */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
 do {                                                                       \
     int _i;                                                                \
+    int _xen = !shadow_mode_external(_dom);                                \
     shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);     \
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
@@ -1281,9 +1291,10 @@ do {                                    
 #elif GUEST_PAGING_LEVELS == 3
 
 /* PAE: if it's an l2h, don't touch Xen mappings */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)      \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \
 do {                                                                       \
     int _i;                                                                \
+    int _xen = !shadow_mode_external(_dom);                                \
     shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                        \
     ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
            || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
@@ -1304,21 +1315,29 @@ do {                                    
 
 #else 
 
-/* 64-bit l2: touch all entries */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code)   \
-do {                                                                    \
-    int _i;                                                             \
-    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                     \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow);  \
-    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )              \
-    {                                                                   \
-        (_sl2e) = _sp + _i;                                             \
-        if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \
-            {_code}                                                     \
-        if ( _done ) break;                                             \
-        increment_ptr_to_guest_entry(_gl2p);                            \
-    }                                                                   \
-    unmap_shadow_page(_sp);                                             \
+/* 64-bit l2: touch all entries except for PAE compat guests. */
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \
+do {                                                                        \
+    int _i;                                                                 \
+    int _xen = !shadow_mode_external(_dom);                                 \
+    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                         \
+    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
+           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
+    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
+    {                                                                       \
+        if ( (!(_xen))                                                      \
+             || !IS_COMPAT(_dom)                                            \
+             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
+             || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
+        {                                                                   \
+            (_sl2e) = _sp + _i;                                             \
+            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \
+                {_code}                                                     \
+            if ( _done ) break;                                             \
+            increment_ptr_to_guest_entry(_gl2p);                            \
+        }                                                                   \
+    }                                                                       \
+    unmap_shadow_page(_sp);                                                 \
 } while (0)
 
 #endif /* different kinds of l2 */
@@ -1343,14 +1362,15 @@ do {                                    
 } while (0)
 
 /* 64-bit l4: avoid Xen mappings */
-#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code)   \
+#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \
 do {                                                                    \
+    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
+    int _xen = !shadow_mode_external(_dom);                             \
     int _i;                                                             \
-    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \
     ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
     for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
-        if ( (!(_xen)) || is_guest_l4_slot(_i) )                        \
+        if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
         {                                                               \
             (_sl4e) = _sp + _i;                                         \
             if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT )       \
@@ -1417,17 +1437,25 @@ void sh_install_xen_entries_in_l4(struct
                                 __PAGE_HYPERVISOR);
     }
 
+    if ( IS_COMPAT(v->domain) )
+    {
+        /* install compat arg xlat entry */
+        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
+            shadow_l4e_from_mfn(
+                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
+                    __PAGE_HYPERVISOR);
+    }
+
     sh_unmap_domain_page(sl4e);    
 }
 #endif
 
-#if (CONFIG_PAGING_LEVELS == 3 || defined(CONFIG_COMPAT)) && 
GUEST_PAGING_LEVELS == 3
+#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
 // For 3-on-3 PV guests, we need to make sure the xen mappings are in
 // place, which means that we need to populate the l2h entry in the l3
 // table.
 
-void sh_install_xen_entries_in_l2h(struct vcpu *v, 
-                                    mfn_t sl2hmfn)
+static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
 {
     struct domain *d = v->domain;
     shadow_l2e_t *sl2e;
@@ -1489,9 +1517,10 @@ void sh_install_xen_entries_in_l2h(struc
 #else
 
     /* Copy the common Xen mappings from the idle domain */
-    memcpy(&sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
-           
&compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
-           COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
+    memcpy(
+        &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],
+        &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
+        COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
 
 #endif
     
@@ -1617,8 +1646,11 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
         case SH_type_l4_shadow:
             sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
 #endif
-#if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
+#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
         case SH_type_l2h_shadow:
+#ifdef CONFIG_COMPAT
+            ASSERT( IS_COMPAT(v->domain) );
+#endif
             sh_install_xen_entries_in_l2h(v, smfn); break;
 #endif
 #if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
@@ -1832,12 +1864,21 @@ static shadow_l2e_t * shadow_get_and_cre
     {
         int r;
         shadow_l3e_t new_sl3e;
+        unsigned int t = SH_type_l2_shadow;
+
+#ifdef CONFIG_COMPAT
+        /* Tag compat L2 containing hypervisor (m2p) mappings */
+        if ( IS_COMPAT(v->domain) &&
+             guest_l4_table_offset(gw->va) == 0 &&
+             guest_l3_table_offset(gw->va) == 3 )
+            t = SH_type_l2h_shadow;
+#endif
         /* No l2 shadow installed: find and install it. */
-        *sl2mfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow);
+        *sl2mfn = get_shadow_status(v, gw->l2mfn, t);
         if ( !mfn_valid(*sl2mfn) ) 
         {
             /* No l2 shadow of this page exists at all: make one. */
-            *sl2mfn = sh_make_shadow(v, gw->l2mfn, SH_type_l2_shadow);
+            *sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
         }
         /* Install the new sl2 table in the sl3e */
         l3e_propagate_from_guest(v, gw->l3e, gw->l3mfn, 
@@ -1958,7 +1999,6 @@ void sh_destroy_l4_shadow(struct vcpu *v
     shadow_l4e_t *sl4e;
     u32 t = mfn_to_shadow_page(smfn)->type;
     mfn_t gmfn, sl4mfn;
-    int xen_mappings;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
                   "%s(%05lx)\n", __func__, mfn_x(smfn));
@@ -1969,9 +2009,8 @@ void sh_destroy_l4_shadow(struct vcpu *v
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
     /* Decrement refcounts of all the old entries */
-    xen_mappings = (!shadow_mode_external(v->domain));
     sl4mfn = smfn; 
-    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
+    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
         if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT ) 
         {
             sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
@@ -2019,12 +2058,15 @@ void sh_destroy_l2_shadow(struct vcpu *v
     shadow_l2e_t *sl2e;
     u32 t = mfn_to_shadow_page(smfn)->type;
     mfn_t gmfn, sl2mfn;
-    int xen_mappings;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
                   "%s(%05lx)\n", __func__, mfn_x(smfn));
-    ASSERT(t == SH_type_l2_shadow 
-           || t == SH_type_l2h_pae_shadow);
+
+#if GUEST_PAGING_LEVELS >= 3
+    ASSERT(t == SH_type_l2_shadow || t == SH_type_l2h_shadow);
+#else
+    ASSERT(t == SH_type_l2_shadow);
+#endif
 
     /* Record that the guest page isn't shadowed any more (in this type) */
     gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
@@ -2033,11 +2075,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
 
     /* Decrement refcounts of all the old entries */
     sl2mfn = smfn;
-    xen_mappings = (!shadow_mode_external(v->domain) &&
-                    ((GUEST_PAGING_LEVELS == 2) ||
-                     ((GUEST_PAGING_LEVELS == 3) &&
-                      (t == SH_type_l2h_pae_shadow))));
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
         if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT ) 
             sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
                         (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) 
@@ -2138,8 +2176,7 @@ void sh_unhook_32b_mappings(struct vcpu 
 void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn)
 {    
     shadow_l2e_t *sl2e;
-    int xen_mappings = !shadow_mode_external(v->domain);
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
         (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
     });
 }
@@ -2150,8 +2187,7 @@ void sh_unhook_pae_mappings(struct vcpu 
 /* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
 {
     shadow_l2e_t *sl2e;
-    int xen_mappings = !shadow_mode_external(v->domain);
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
         (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
     });
 }
@@ -2161,8 +2197,7 @@ void sh_unhook_64b_mappings(struct vcpu 
 void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn)
 {
     shadow_l4e_t *sl4e;
-    int xen_mappings = !shadow_mode_external(v->domain);
-    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, xen_mappings, {
+    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
         (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
     });
 }
@@ -2208,7 +2243,7 @@ static int validate_gl4e(struct vcpu *v,
     {
         int shadow_index = (((unsigned long)sl4p & ~PAGE_MASK) /
                             sizeof(shadow_l4e_t));
-        int reserved_xen_slot = !is_guest_l4_slot(shadow_index);
+        int reserved_xen_slot = !is_guest_l4_slot(v->domain, shadow_index);
 
         if ( unlikely(reserved_xen_slot) )
         {
@@ -2471,7 +2506,7 @@ sh_map_and_validate_gl2he(struct vcpu *v
 sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn,
                            void *new_gl2p, u32 size)
 {
-#if GUEST_PAGING_LEVELS == 3
+#if GUEST_PAGING_LEVELS >= 3
     return sh_map_and_validate(v, gl2mfn, new_gl2p, size, 
                                 SH_type_l2h_shadow, 
                                 shadow_l2_index, 
@@ -3346,7 +3381,12 @@ sh_set_toplevel_shadow(struct vcpu *v,
     
 #if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
     /* Once again OK to unhook entries from this table if we see fork/exit */
-    ASSERT(sh_mfn_is_a_page_table(gmfn));
+#if CONFIG_PAGING_LEVELS == 4
+    if ( IS_COMPAT(d) )
+        ASSERT(!sh_mfn_is_a_page_table(gmfn));
+    else
+#endif
+        ASSERT(sh_mfn_is_a_page_table(gmfn));
     mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
 #endif
 
@@ -3754,7 +3794,7 @@ void sh_clear_shadow_entry(struct vcpu *
     case SH_type_l1_shadow:
         (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
     case SH_type_l2_shadow:
-#if GUEST_PAGING_LEVELS == 3
+#if GUEST_PAGING_LEVELS >= 3
     case SH_type_l2h_shadow:
 #endif
         (void) shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
@@ -3774,11 +3814,8 @@ int sh_remove_l1_shadow(struct vcpu *v, 
     shadow_l2e_t *sl2e;
     int done = 0;
     int flags;
-#if GUEST_PAGING_LEVELS != 4
-    int xen_mappings = !shadow_mode_external(v->domain);
-#endif
     
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, xen_mappings, 
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain, 
     {
         flags = shadow_l2e_get_flags(*sl2e);
         if ( (flags & _PAGE_PRESENT) 
@@ -3821,9 +3858,9 @@ int sh_remove_l3_shadow(struct vcpu *v, 
 {
     shadow_l4e_t *sl4e;
     int done = 0;
-    int flags, xen_mappings = !shadow_mode_external(v->domain);
+    int flags;
     
-    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, xen_mappings,
+    SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
     {
         flags = shadow_l4e_get_flags(*sl4e);
         if ( (flags & _PAGE_PRESENT) 
@@ -4196,14 +4233,11 @@ int sh_audit_l2_table(struct vcpu *v, mf
     gfn_t gfn;
     char *s;
     int done = 0;
-#if GUEST_PAGING_LEVELS != 4
-    int xen_mappings = !shadow_mode_external(v->domain);
-#endif
 
     /* Follow the backpointer */
     gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
     gl2e = gp = sh_map_domain_page(gl2mfn);
-    SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, xen_mappings, {
+    SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, v->domain, {
 
         s = sh_audit_flags(v, 2, guest_l2e_get_flags(*gl2e),
                             shadow_l2e_get_flags(*sl2e));
@@ -4255,10 +4289,11 @@ int sh_audit_l3_table(struct vcpu *v, mf
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
             gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn), 
-                                     (GUEST_PAGING_LEVELS == 3 
+                                     ((GUEST_PAGING_LEVELS == 3 ||
+                                       IS_COMPAT(v->domain))
                                       && !shadow_mode_external(v->domain)
                                       && (guest_index(gl3e) % 4) == 3)
-                                     ? SH_type_l2h_pae_shadow
+                                     ? SH_type_l2h_shadow
                                      : SH_type_l2_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
@@ -4278,12 +4313,11 @@ int sh_audit_l4_table(struct vcpu *v, mf
     gfn_t gfn;
     char *s;
     int done = 0;
-    int xen_mappings = !shadow_mode_external(v->domain);
 
     /* Follow the backpointer */
     gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
     gl4e = gp = sh_map_domain_page(gl4mfn);
-    SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, xen_mappings,
+    SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, v->domain,
     {
         s = sh_audit_flags(v, 4, guest_l4e_get_flags(*gl4e),
                             shadow_l4e_get_flags(*sl4e));
diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Mon Feb 19 19:58:07 2007 +0000
@@ -269,12 +269,13 @@ static inline void shadow_check_page_str
 #define SH_type_l1_64_shadow   (8U) /* shadowing a 64-bit L1 page */
 #define SH_type_fl1_64_shadow  (9U) /* L1 shadow for 64-bit 2M superpg */
 #define SH_type_l2_64_shadow  (10U) /* shadowing a 64-bit L2 page */
-#define SH_type_l3_64_shadow  (11U) /* shadowing a 64-bit L3 page */
-#define SH_type_l4_64_shadow  (12U) /* shadowing a 64-bit L4 page */
-#define SH_type_max_shadow    (12U)
-#define SH_type_p2m_table     (13U) /* in use as the p2m table */
-#define SH_type_monitor_table (14U) /* in use as a monitor table */
-#define SH_type_unused        (15U)
+#define SH_type_l2h_64_shadow (11U) /* shadowing a compat PAE L2 high page */
+#define SH_type_l3_64_shadow  (12U) /* shadowing a 64-bit L3 page */
+#define SH_type_l4_64_shadow  (13U) /* shadowing a 64-bit L4 page */
+#define SH_type_max_shadow    (13U)
+#define SH_type_p2m_table     (14U) /* in use as the p2m table */
+#define SH_type_monitor_table (15U) /* in use as a monitor table */
+#define SH_type_unused        (16U)
 
 /* 
  * What counts as a pinnable shadow?
@@ -325,12 +326,13 @@ static inline int sh_type_is_pinnable(st
 #define SHF_L1_64   (1u << SH_type_l1_64_shadow)
 #define SHF_FL1_64  (1u << SH_type_fl1_64_shadow)
 #define SHF_L2_64   (1u << SH_type_l2_64_shadow)
+#define SHF_L2H_64  (1u << SH_type_l2h_64_shadow)
 #define SHF_L3_64   (1u << SH_type_l3_64_shadow)
 #define SHF_L4_64   (1u << SH_type_l4_64_shadow)
 
 #define SHF_32  (SHF_L1_32|SHF_FL1_32|SHF_L2_32)
 #define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE)
-#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L3_64|SHF_L4_64)
+#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L2H_64|SHF_L3_64|SHF_L4_64)
 
 /* Used for hysteresis when automatically unhooking mappings on fork/exit */
 #define SHF_unhooked_mappings (1u<<31)
@@ -367,7 +369,6 @@ void shadow_unhook_mappings(struct vcpu 
 
 /* Install the xen mappings in various flavours of shadow */
 void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
-void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn);
 void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
 
 /* Update the shadows in response to a pagetable write from Xen */
diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Mon Feb 19 19:58:07 2007 +0000
@@ -389,6 +389,7 @@ static inline guest_l4e_t guest_l4e_from
 #define SH_type_l1_shadow  SH_type_l1_64_shadow
 #define SH_type_fl1_shadow SH_type_fl1_64_shadow
 #define SH_type_l2_shadow  SH_type_l2_64_shadow
+#define SH_type_l2h_shadow SH_type_l2h_64_shadow
 #define SH_type_l3_shadow  SH_type_l3_64_shadow
 #define SH_type_l4_shadow  SH_type_l4_64_shadow
 #endif
diff -r 0b882c911b88 -r 9c2e6f8f3aa7 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Mon Feb 19 16:16:53 2007 +0000
+++ b/xen/include/asm-x86/x86_64/page.h Mon Feb 19 19:58:07 2007 +0000
@@ -59,9 +59,11 @@ typedef l4_pgentry_t root_pgentry_t;
       !((_t) & PGT_pae_xen_l2) ||                      \
       ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
 #define is_guest_l3_slot(_s) (1)
-#define is_guest_l4_slot(_s)                   \
-    (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
-     ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))
+#define is_guest_l4_slot(_d, _s)                    \
+    ( IS_COMPAT(_d)                                 \
+      ? ((_s) == 0)                                 \
+      : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
+         ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
 
 #define root_get_pfn              l4e_get_pfn
 #define root_get_flags            l4e_get_flags
@@ -96,7 +98,7 @@ typedef l4_pgentry_t root_pgentry_t;
 #define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
 #define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
 
-#define COMPAT_L3_DISALLOW_MASK 0xFFFFF1E6U
+#define COMPAT_L3_DISALLOW_MASK L3_DISALLOW_MASK
 
 #define PAGE_HYPERVISOR         (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
 #define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.