[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/5] x86-64: fold shadow_page_info fields into page_info



... combining the list entry members of both structures and removing
the artificial 'mbz' member (shadow code must keep the real underlying
member 'count_info' at zero for the lifetime of pages use as shadows).

This also fixes a latent issue with u.inuse._domain not getting
explicitly cleared before returning shadow pages to the domain heap -
it just so happened that this member turned out to be zero in all
(normal?) cases when a shadow page ends its life (but there were
neither build nor run-time assertions that this would actually be the
case). The bug got exposed by a subsequent patch changing the order of
fields in struct page_info.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-01-30.orig/xen/arch/x86/mm/shadow/common.c     2009-01-30 
10:13:53.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/common.c  2009-01-30 10:17:04.000000000 
+0100
@@ -48,9 +48,9 @@ void shadow_domain_init(struct domain *d
     int i;
     shadow_lock_init(d);
     for ( i = 0; i <= SHADOW_MAX_ORDER; i++ )
-        INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
+        INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
-    INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
+    INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
     /* Use shadow pagetables for log-dirty support */
     paging_log_dirty_init(d, shadow_enable_log_dirty, 
@@ -1291,9 +1291,9 @@ static inline int space_is_available(
     for ( ; order <= shadow_max_order(d); ++order )
     {
         unsigned int n = count;
-        const struct list_head *p;
+        const struct shadow_page_info *sp;
 
-        list_for_each ( p, &d->arch.paging.shadow.freelists[order] )
+        page_list_for_each ( sp, &d->arch.paging.shadow.freelists[order] )
             if ( --n == 0 )
                 return 1;
         count = (count + 1) >> 1;
@@ -1307,7 +1307,7 @@ static inline int space_is_available(
 static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
 {
     struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
-    switch ( sp->type )
+    switch ( sp->u.sh.type )
     {
     case SH_type_l2_32_shadow:
         SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v,smfn);
@@ -1322,7 +1322,7 @@ static void shadow_unhook_mappings(struc
         break;
 #endif
     default:
-        SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->type);
+        SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
         BUG();
     }
 }
@@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
         /* Convert smfn to gfn */
         unsigned long gfn;
         ASSERT(mfn_valid(smfn));
-        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->backpointer));
+        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->u.sh.back));
         __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
                     sizeof(gfn), (unsigned char*)&gfn);
     }
@@ -1350,8 +1350,7 @@ static void _shadow_prealloc(
     /* Need a vpcu for calling unpins; for now, since we don't have
      * per-vcpu shadows, any will do */
     struct vcpu *v, *v2;
-    struct list_head *l, *t;
-    struct shadow_page_info *sp;
+    struct shadow_page_info *sp, *t;
     mfn_t smfn;
     int i;
 
@@ -1365,9 +1364,8 @@ static void _shadow_prealloc(
 
     /* Stage one: walk the list of pinned pages, unpinning them */
     perfc_incr(shadow_prealloc_1);
-    list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
+    page_list_for_each_safe_reverse(sp, t, 
&d->arch.paging.shadow.pinned_shadows)
     {
-        sp = list_entry(l, struct shadow_page_info, list);
         smfn = shadow_page_to_mfn(sp);
 
         /* Unpin this top-level shadow */
@@ -1427,8 +1425,7 @@ void shadow_prealloc(struct domain *d, u
  * this domain's shadows */
 static void shadow_blow_tables(struct domain *d) 
 {
-    struct list_head *l, *t;
-    struct shadow_page_info *sp;
+    struct shadow_page_info *sp, *t;
     struct vcpu *v = d->vcpu[0];
     mfn_t smfn;
     int i;
@@ -1436,9 +1433,8 @@ static void shadow_blow_tables(struct do
     ASSERT(v != NULL);
 
     /* Pass one: unpin all pinned pages */
-    list_for_each_backwards_safe(l,t, &d->arch.paging.shadow.pinned_shadows)
+    page_list_for_each_safe_reverse(sp, t, 
&d->arch.paging.shadow.pinned_shadows)
     {
-        sp = list_entry(l, struct shadow_page_info, list);
         smfn = shadow_page_to_mfn(sp);
         sh_unpin(v, smfn);
     }
@@ -1515,7 +1511,7 @@ mfn_t shadow_alloc(struct domain *d,  
 
     /* Find smallest order which can satisfy the request. */
     for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
-        if ( !list_empty(&d->arch.paging.shadow.freelists[i]) )
+        if ( (sp = page_list_remove_head(&d->arch.paging.shadow.freelists[i])) 
)
             goto found;
     
     /* If we get here, we failed to allocate. This should never happen.
@@ -1526,16 +1522,12 @@ mfn_t shadow_alloc(struct domain *d,  
     BUG();
 
  found:
-    sp = list_entry(d->arch.paging.shadow.freelists[i].next, 
-                    struct shadow_page_info, list);
-    list_del(&sp->list);
-            
     /* We may have to halve the chunk a number of times. */
     while ( i != order )
     {
         i--;
-        sp->order = i;
-        list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[i]);
+        sp->u.sh.order = i;
+        page_list_add_tail(sp, &d->arch.paging.shadow.freelists[i]);
         sp += 1 << i;
     }
     d->arch.paging.shadow.free_pages -= 1 << order;
@@ -1557,11 +1549,11 @@ mfn_t shadow_alloc(struct domain *d,  
         ASSERT(p != NULL);
         clear_page(p);
         sh_unmap_domain_page(p);
-        INIT_LIST_HEAD(&sp[i].list);
-        sp[i].type = shadow_type;
-        sp[i].pinned = 0;
-        sp[i].count = 0;
-        sp[i].backpointer = backpointer;
+        INIT_PAGE_LIST_ENTRY(&sp[i].list);
+        sp[i].u.sh.type = shadow_type;
+        sp[i].u.sh.pinned = 0;
+        sp[i].u.sh.count = 0;
+        sp[i].u.sh.back = backpointer;
         sp[i].next_shadow = NULL;
         perfc_incr(shadow_alloc_count);
     }
@@ -1581,7 +1573,7 @@ void shadow_free(struct domain *d, mfn_t
     ASSERT(shadow_locked_by_me(d));
     perfc_incr(shadow_free);
 
-    shadow_type = sp->type;
+    shadow_type = sp->u.sh.type;
     ASSERT(shadow_type != SH_type_none);
     ASSERT(shadow_type != SH_type_p2m_table);
     order = shadow_order(shadow_type);
@@ -1605,7 +1597,7 @@ void shadow_free(struct domain *d, mfn_t
         }
 #endif
         /* Strip out the type: this is now a free shadow page */
-        sp[i].type = 0;
+        sp[i].u.sh.type = 0;
         /* Remember the TLB timestamp so we will know whether to flush 
          * TLBs when we reuse the page.  Because the destructors leave the
          * contents of the pages in place, we can delay TLB flushes until
@@ -1620,20 +1612,22 @@ void shadow_free(struct domain *d, mfn_t
         mask = 1 << order;
         if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
             /* Merge with predecessor block? */
-            if ( ((sp-mask)->type != PGT_none) || ((sp-mask)->order != order) )
+            if ( ((sp-mask)->u.sh.type != PGT_none) ||
+                 ((sp-mask)->u.sh.order != order) )
                 break;
-            list_del(&(sp-mask)->list);
             sp -= mask;
+            page_list_del(sp, &d->arch.paging.shadow.freelists[order]);
         } else {
             /* Merge with successor block? */
-            if ( ((sp+mask)->type != PGT_none) || ((sp+mask)->order != order) )
+            if ( ((sp+mask)->u.sh.type != PGT_none) ||
+                 ((sp+mask)->u.sh.order != order) )
                 break;
-            list_del(&(sp+mask)->list);
+            page_list_del(sp + mask, &d->arch.paging.shadow.freelists[order]);
         }
     }
 
-    sp->order = order;
-    list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);
+    sp->u.sh.order = order;
+    page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
 }
 
 /* Divert some memory from the pool to be used by the p2m mapping.
@@ -1810,23 +1804,26 @@ static unsigned int sh_set_allocation(st
             d->arch.paging.shadow.total_pages += 1 << order;
             for ( j = 0; j < 1U << order; j++ )
             {
-                sp[j].type = 0;  
-                sp[j].pinned = 0;
-                sp[j].count = 0;
-                sp[j].mbz = 0;
+                sp[j].u.sh.type = 0;
+                sp[j].u.sh.pinned = 0;
+                sp[j].u.sh.count = 0;
                 sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
             }
-            sp->order = order;
-            list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);
+            sp->u.sh.order = order;
+            page_list_add_tail(sp, &d->arch.paging.shadow.freelists[order]);
         } 
         else if ( d->arch.paging.shadow.total_pages > pages ) 
         {
             /* Need to return memory to domheap */
             _shadow_prealloc(d, order, 1);
-            ASSERT(!list_empty(&d->arch.paging.shadow.freelists[order]));
-            sp = list_entry(d->arch.paging.shadow.freelists[order].next,
-                            struct shadow_page_info, list);
-            list_del(&sp->list);
+            sp = 
page_list_remove_head(&d->arch.paging.shadow.freelists[order]);
+            ASSERT(sp);
+            /*
+             * The pages were allocated anonymously, but the owner field
+             * gets overwritten normally, so need to clear it here.
+             */
+            for ( j = 0; j < 1U << order; j++ )
+                page_set_owner(&((struct page_info *)sp)[j], NULL);
             d->arch.paging.shadow.free_pages -= 1 << order;
             d->arch.paging.shadow.total_pages -= 1 << order;
             free_domheap_pages((struct page_info *)sp, order);
@@ -1886,37 +1883,38 @@ static void sh_hash_audit_bucket(struct 
     while ( sp )
     {
         /* Not a shadow? */
-        BUG_ON( sp->mbz != 0 );
+        BUG_ON( sp->count_info != 0 );
         /* Bogus type? */
-        BUG_ON( sp->type == 0 ); 
-        BUG_ON( sp->type > SH_type_max_shadow );
+        BUG_ON( sp->u.sh.type == 0 );
+        BUG_ON( sp->u.sh.type > SH_type_max_shadow );
         /* Wrong bucket? */
-        BUG_ON( sh_hash(sp->backpointer, sp->type) != bucket ); 
+        BUG_ON( sh_hash(sp->u.sh.back, sp->u.sh.type) != bucket );
         /* Duplicate entry? */
         for ( x = sp->next_shadow; x; x = x->next_shadow )
-            BUG_ON( x->backpointer == sp->backpointer && x->type == sp->type );
+            BUG_ON( x->u.sh.back == sp->u.sh.back &&
+                    x->u.sh.type == sp->u.sh.type );
         /* Follow the backpointer to the guest pagetable */
-        if ( sp->type != SH_type_fl1_32_shadow
-             && sp->type != SH_type_fl1_pae_shadow
-             && sp->type != SH_type_fl1_64_shadow )
+        if ( sp->u.sh.type != SH_type_fl1_32_shadow
+             && sp->u.sh.type != SH_type_fl1_pae_shadow
+             && sp->u.sh.type != SH_type_fl1_64_shadow )
         {
-            struct page_info *gpg = mfn_to_page(_mfn(sp->backpointer));
+            struct page_info *gpg = mfn_to_page(_mfn(sp->u.sh.back));
             /* Bad shadow flags on guest page? */
-            BUG_ON( !(gpg->shadow_flags & (1<<sp->type)) );
+            BUG_ON( !(gpg->shadow_flags & (1<<sp->u.sh.type)) );
             /* Bad type count on guest page? */
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-            if ( sp->type == SH_type_l1_32_shadow
-                 || sp->type == SH_type_l1_pae_shadow
-                 || sp->type == SH_type_l1_64_shadow )
+            if ( sp->u.sh.type == SH_type_l1_32_shadow
+                 || sp->u.sh.type == SH_type_l1_pae_shadow
+                 || sp->u.sh.type == SH_type_l1_64_shadow )
             {
                 if ( (gpg->u.inuse.type_info & PGT_type_mask) == 
PGT_writable_page
                      && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
                 {
                     if ( !page_is_out_of_sync(gpg) )
                     {
-                        SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
+                        SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by 
%#"PRI_mfn")"
                                      " and not OOS but has typecount %#lx\n",
-                                     sp->backpointer, 
+                                     sp->u.sh.back,
                                      mfn_x(shadow_page_to_mfn(sp)), 
                                      gpg->u.inuse.type_info);
                         BUG();
@@ -1928,9 +1926,9 @@ static void sh_hash_audit_bucket(struct 
             if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
                  && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
             {
-                SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
+                SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
                              " but has typecount %#lx\n",
-                             sp->backpointer, mfn_x(shadow_page_to_mfn(sp)), 
+                             sp->u.sh.back, mfn_x(shadow_page_to_mfn(sp)),
                              gpg->u.inuse.type_info);
                 BUG();
             }
@@ -2016,7 +2014,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
     prev = NULL;
     while(sp)
     {
-        if ( sp->backpointer == n && sp->type == t )
+        if ( sp->u.sh.back == n && sp->u.sh.type == t )
         {
             /* Pull-to-front if 'sp' isn't already the head item */
             if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
@@ -2148,12 +2146,12 @@ static void hash_foreach(struct vcpu *v,
          * deleted anything from the hash (lookups are OK, though). */
         for ( x = d->arch.paging.shadow.hash_table[i]; x; x = x->next_shadow )
         {
-            if ( callback_mask & (1 << x->type) ) 
+            if ( callback_mask & (1 << x->u.sh.type) )
             {
-                ASSERT(x->type <= 15);
-                ASSERT(callbacks[x->type] != NULL);
-                done = callbacks[x->type](v, shadow_page_to_mfn(x), 
-                                          callback_mfn);
+                ASSERT(x->u.sh.type <= 15);
+                ASSERT(callbacks[x->u.sh.type] != NULL);
+                done = callbacks[x->u.sh.type](v, shadow_page_to_mfn(x),
+                                               callback_mfn);
                 if ( done ) break;
             }
         }
@@ -2171,7 +2169,7 @@ static void hash_foreach(struct vcpu *v,
 void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
 {
     struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
-    unsigned int t = sp->type;
+    unsigned int t = sp->u.sh.type;
 
 
     SHADOW_PRINTK("smfn=%#lx\n", mfn_x(smfn));
@@ -2183,7 +2181,7 @@ void sh_destroy_shadow(struct vcpu *v, m
            t == SH_type_fl1_64_shadow  || 
            t == SH_type_monitor_table  || 
            (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
-           (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
+           (page_get_owner(mfn_to_page(_mfn(sp->u.sh.back)))
             == v->domain)); 
 
     /* The down-shifts here are so that the switch statement is on nice
@@ -2435,7 +2433,7 @@ int sh_remove_write_access(struct vcpu *
     {
         unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
         mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
-        int shtype = mfn_to_shadow_page(last_smfn)->type;
+        int shtype = mfn_to_shadow_page(last_smfn)->u.sh.type;
 
         if ( callbacks[shtype] ) 
             callbacks[shtype](v, last_smfn, gmfn);
@@ -2483,20 +2481,20 @@ int sh_remove_write_access_from_sl1p(str
     ASSERT(mfn_valid(smfn));
     ASSERT(mfn_valid(gmfn));
     
-    if ( sp->type == SH_type_l1_32_shadow
-         || sp->type == SH_type_fl1_32_shadow )
+    if ( sp->u.sh.type == SH_type_l1_32_shadow
+         || sp->u.sh.type == SH_type_fl1_32_shadow )
     {
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2)
             (v, gmfn, smfn, off);
     }
 #if CONFIG_PAGING_LEVELS >= 3
-    else if ( sp->type == SH_type_l1_pae_shadow
-              || sp->type == SH_type_fl1_pae_shadow )
+    else if ( sp->u.sh.type == SH_type_l1_pae_shadow
+              || sp->u.sh.type == SH_type_fl1_pae_shadow )
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3)
             (v, gmfn, smfn, off);
 #if CONFIG_PAGING_LEVELS >= 4
-    else if ( sp->type == SH_type_l1_64_shadow
-              || sp->type == SH_type_fl1_64_shadow )
+    else if ( sp->u.sh.type == SH_type_l1_64_shadow
+              || sp->u.sh.type == SH_type_fl1_64_shadow )
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4)
             (v, gmfn, smfn, off);
 #endif
@@ -2603,12 +2601,12 @@ static int sh_remove_shadow_via_pointer(
     void *vaddr;
     int rc;
 
-    ASSERT(sp->type > 0);
-    ASSERT(sp->type < SH_type_max_shadow);
-    ASSERT(sp->type != SH_type_l2_32_shadow);
-    ASSERT(sp->type != SH_type_l2_pae_shadow);
-    ASSERT(sp->type != SH_type_l2h_pae_shadow);
-    ASSERT(sp->type != SH_type_l4_64_shadow);
+    ASSERT(sp->u.sh.type > 0);
+    ASSERT(sp->u.sh.type < SH_type_max_shadow);
+    ASSERT(sp->u.sh.type != SH_type_l2_32_shadow);
+    ASSERT(sp->u.sh.type != SH_type_l2_pae_shadow);
+    ASSERT(sp->u.sh.type != SH_type_l2h_pae_shadow);
+    ASSERT(sp->u.sh.type != SH_type_l4_64_shadow);
     
     if (sp->up == 0) return 0;
     pmfn = _mfn(sp->up >> PAGE_SHIFT);
@@ -2619,10 +2617,10 @@ static int sh_remove_shadow_via_pointer(
     ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
     
     /* Is this the only reference to this shadow? */
-    rc = (sp->count == 1) ? 1 : 0;
+    rc = (sp->u.sh.count == 1) ? 1 : 0;
 
     /* Blank the offending entry */
-    switch (sp->type) 
+    switch (sp->u.sh.type)
     {
     case SH_type_l1_32_shadow:
     case SH_type_l2_32_shadow:
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/multi.c      2009-01-30 
08:27:02.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/multi.c   2009-01-30 10:14:47.000000000 
+0100
@@ -974,12 +974,12 @@ static int shadow_set_l2e(struct vcpu *v
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         {
             struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
-            mfn_t gl1mfn = _mfn(sp->backpointer);
+            mfn_t gl1mfn = _mfn(sp->u.sh.back);
 
             /* If the shadow is a fl1 then the backpointer contains
                the GFN instead of the GMFN, and it's definitely not
                OOS. */
-            if ( (sp->type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
+            if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
                  && mfn_is_out_of_sync(gl1mfn) )
                 sh_resync(v, gl1mfn);
         }
@@ -1194,8 +1194,8 @@ static inline void increment_ptr_to_gues
 do {                                                                    \
     int _i;                                                             \
     shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
-    ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \
-           || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
+    ASSERT(mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
+           || mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
     for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         (_sl1e) = _sp + _i;                                             \
@@ -1232,7 +1232,7 @@ do {                                    
 do {                                                                      \
     int _i, _j, __done = 0;                                               \
     int _xen = !shadow_mode_external(_dom);                               \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \
+    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
     for ( _j = 0; _j < 4 && !__done; _j++ )                               \
     {                                                                     \
         shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
@@ -1260,11 +1260,11 @@ do {                                    
     int _i;                                                                \
     int _xen = !shadow_mode_external(_dom);                                \
     shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \
-           || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
+    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
+           || mfn_to_shadow_page(_sl2mfn)->u.sh.type == 
SH_type_l2h_pae_shadow);\
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
         if ( (!(_xen))                                                     \
-             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\
+             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != 
SH_type_l2h_pae_shadow\
              || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \
                  < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
         {                                                                  \
@@ -1285,13 +1285,13 @@ do {                                    
     int _i;                                                                 \
     int _xen = !shadow_mode_external(_dom);                                 \
     shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \
-           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \
+    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
+           mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
     {                                                                       \
         if ( (!(_xen))                                                      \
              || !is_pv_32on64_domain(_dom)                                  \
-             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
+             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != 
SH_type_l2h_64_shadow\
              || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
         {                                                                   \
             (_sl2e) = _sp + _i;                                             \
@@ -1313,7 +1313,7 @@ do {                                    
 do {                                                                    \
     int _i;                                                             \
     shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
-    ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \
+    ASSERT(mfn_to_shadow_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
     for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         (_sl3e) = _sp + _i;                                             \
@@ -1331,7 +1331,7 @@ do {                                    
     shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
     int _xen = !shadow_mode_external(_dom);                             \
     int _i;                                                             \
-    ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \
+    ASSERT(mfn_to_shadow_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
     for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
@@ -1519,14 +1519,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
          * of them, decide that this isn't an old linux guest, and stop
          * pinning l3es.  This is not very quick but it doesn't happen
          * very often. */
-        struct list_head *l, *t;
-        struct shadow_page_info *sp;
+        struct shadow_page_info *sp, *t;
         struct vcpu *v2;
         int l4count = 0, vcpus = 0;
-        list_for_each(l, &v->domain->arch.paging.shadow.pinned_shadows)
+        page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
         {
-            sp = list_entry(l, struct shadow_page_info, list);
-            if ( sp->type == SH_type_l4_64_shadow )
+            if ( sp->u.sh.type == SH_type_l4_64_shadow )
                 l4count++;
         }
         for_each_vcpu ( v->domain, v2 ) 
@@ -1534,10 +1532,9 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
         if ( l4count > 2 * vcpus ) 
         {
             /* Unpin all the pinned l3 tables, and don't pin any more. */
-            list_for_each_safe(l, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
+            page_list_for_each_safe(sp, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
             {
-                sp = list_entry(l, struct shadow_page_info, list);
-                if ( sp->type == SH_type_l3_64_shadow )
+                if ( sp->u.sh.type == SH_type_l3_64_shadow )
                     sh_unpin(v, shadow_page_to_mfn(sp));
             }
             v->domain->arch.paging.shadow.opt_flags &= 
~SHOPT_LINUX_L3_TOPLEVEL;
@@ -1921,7 +1918,7 @@ static shadow_l1e_t * shadow_get_and_cre
 void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l4e_t *sl4e;
-    u32 t = mfn_to_shadow_page(smfn)->type;
+    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
     mfn_t gmfn, sl4mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1929,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
     ASSERT(t == SH_type_l4_shadow);
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
+    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
     /* Decrement refcounts of all the old entries */
@@ -1950,7 +1947,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
 void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l3e_t *sl3e;
-    u32 t = mfn_to_shadow_page(smfn)->type;
+    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
     mfn_t gmfn, sl3mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1958,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
     ASSERT(t == SH_type_l3_shadow);
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
+    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
 
@@ -1980,7 +1977,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
 void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l2e_t *sl2e;
-    u32 t = mfn_to_shadow_page(smfn)->type;
+    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
     mfn_t gmfn, sl2mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1993,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
 #endif
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
+    gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
 
@@ -2014,7 +2011,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
 {
     struct domain *d = v->domain;
     shadow_l1e_t *sl1e;
-    u32 t = mfn_to_shadow_page(smfn)->type;
+    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
                   "%s(%05lx)\n", __func__, mfn_x(smfn));
@@ -2023,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
     /* Record that the guest page isn't shadowed any more (in this type) */
     if ( t == SH_type_fl1_shadow )
     {
-        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->backpointer);
+        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->u.sh.back);
         delete_fl1_shadow_status(v, gfn, smfn);
     }
     else 
     {
-        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
+        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->u.sh.back);
         delete_shadow_status(v, gmfn, t, smfn);
         shadow_demote(v, gmfn, t);
     }
@@ -2054,7 +2051,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
 void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
 {
     struct domain *d = v->domain;
-    ASSERT(mfn_to_shadow_page(mmfn)->type == SH_type_monitor_table);
+    ASSERT(mfn_to_shadow_page(mmfn)->u.sh.type == SH_type_monitor_table);
 
 #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
     {
@@ -2298,7 +2295,7 @@ static int validate_gl2e(struct vcpu *v,
 
 #if SHADOW_PAGING_LEVELS == 3
         reserved_xen_slot = 
-            ((mfn_to_shadow_page(sl2mfn)->type == SH_type_l2h_pae_shadow) &&
+            ((mfn_to_shadow_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) 
&&
              (shadow_index 
               >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
 #else /* SHADOW_PAGING_LEVELS == 2 */
@@ -2352,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
     result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
+    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
     if ( mfn_valid(gl1mfn) 
          && mfn_is_out_of_sync(gl1mfn) )
     {
@@ -2437,7 +2434,7 @@ int sh_safe_not_to_sync(struct vcpu *v, 
     
     /* Up to l2 */
     sp = mfn_to_shadow_page(smfn);
-    if ( sp->count != 1 || !sp->up )
+    if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
@@ -2445,14 +2442,14 @@ int sh_safe_not_to_sync(struct vcpu *v, 
 #if (SHADOW_PAGING_LEVELS == 4) 
     /* up to l3 */
     sp = mfn_to_shadow_page(smfn);
-    if ( sp->count != 1 || !sp->up )
+    if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
 
     /* up to l4 */
     sp = mfn_to_shadow_page(smfn);
-    if ( sp->count != 1 
+    if ( sp->u.sh.count != 1
          || sh_type_is_pinnable(v, SH_type_l3_64_shadow) || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
@@ -2971,7 +2968,7 @@ static int sh_page_fault(struct vcpu *v,
                                        sizeof(sl2e)) != 0)
                      || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
                      || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
-                                      shadow_l2e_get_mfn(sl2e))->backpointer))
+                                      shadow_l2e_get_mfn(sl2e))->u.sh.back))
                      || unlikely(mfn_is_out_of_sync(gl1mfn)) )
                {
                    /* Hit the slow path as if there had been no 
@@ -3523,7 +3520,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
     // easier than invalidating all of the individual 4K pages).
     //
     sl1mfn = shadow_l2e_get_mfn(sl2e);
-    if ( mfn_to_shadow_page(sl1mfn)->type
+    if ( mfn_to_shadow_page(sl1mfn)->u.sh.type
          == SH_type_fl1_shadow )
     {
         flush_tlb_local();
@@ -3533,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Check to see if the SL1 is out of sync. */
     {
-        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
+        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
         struct page_info *pg = mfn_to_page(gl1mfn);
         if ( mfn_valid(gl1mfn) 
              && page_is_out_of_sync(pg) )
@@ -3563,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
             }
 
             sl1mfn = shadow_l2e_get_mfn(sl2e);
-            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
+            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
             pg = mfn_to_page(gl1mfn);
             
             if ( likely(sh_mfn_is_a_page_table(gl1mfn)
@@ -3968,7 +3965,7 @@ sh_set_toplevel_shadow(struct vcpu *v, 
         /* Need to repin the old toplevel shadow if it's been unpinned
          * by shadow_prealloc(): in PV mode we're still running on this
          * shadow and it's not safe to free it yet. */
-        if ( !mfn_to_shadow_page(old_smfn)->pinned && !sh_pin(v, old_smfn) )
+        if ( !mfn_to_shadow_page(old_smfn)->u.sh.pinned && !sh_pin(v, 
old_smfn) )
         {
             SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
             domain_crash(v->domain);
@@ -4269,9 +4266,9 @@ int sh_rm_write_access_from_sl1p(struct 
 
     sp = mfn_to_shadow_page(smfn);
 
-    if ( sp->mbz != 0
-         || (sp->type != SH_type_l1_shadow
-             && sp->type != SH_type_fl1_shadow) )
+    if ( sp->count_info != 0
+         || (sp->u.sh.type != SH_type_l1_shadow
+             && sp->u.sh.type != SH_type_fl1_shadow) )
         goto fail;
 
     sl1p = sh_map_domain_page(smfn);
@@ -4410,7 +4407,7 @@ int sh_rm_mappings_from_l1(struct vcpu *
 void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
 /* Blank out a single shadow entry */
 {
-    switch ( mfn_to_shadow_page(smfn)->type )
+    switch ( mfn_to_shadow_page(smfn)->u.sh.type )
     {
     case SH_type_l1_shadow:
         (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
@@ -4443,7 +4440,7 @@ int sh_remove_l1_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
         {
             (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
-            if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
+            if ( mfn_to_shadow_page(sl1mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4466,7 +4463,7 @@ int sh_remove_l2_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
         {
             (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
-            if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
+            if ( mfn_to_shadow_page(sl2mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4488,7 +4485,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
         {
             (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
-            if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
+            if ( mfn_to_shadow_page(sl3mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4890,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
     int done = 0;
     
     /* Follow the backpointer */
-    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
+    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->u.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
@@ -4980,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
+    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->u.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Only L1's may be out of sync. */
@@ -5029,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->backpointer);
+    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->u.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Only L1's may be out of sync. */
@@ -5076,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
+    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->u.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Only L1's may be out of sync. */
--- 2009-01-30.orig/xen/arch/x86/mm/shadow/private.h    2009-01-28 
08:53:49.000000000 +0100
+++ 2009-01-30/xen/arch/x86/mm/shadow/private.h 2009-01-30 10:19:16.000000000 
+0100
@@ -220,60 +220,6 @@ extern void shadow_audit_tables(struct v
 #undef GUEST_LEVELS
 #endif /* CONFIG_PAGING_LEVELS == 4 */
 
-/******************************************************************************
- * Page metadata for shadow pages.
- */
-
-struct shadow_page_info
-{
-    union {
-        /* Ensures that shadow_page_info is same size as page_info. */
-        struct page_info page_info;
-
-        struct {
-            union {
-                /* When in use, guest page we're a shadow of */
-                unsigned long backpointer;
-                /* When free, order of the freelist we're on */
-                unsigned int order;
-            };
-            union {
-                /* When in use, next shadow in this hash chain */
-                struct shadow_page_info *next_shadow;
-                /* When free, TLB flush time when freed */
-                u32 tlbflush_timestamp;
-            };
-            struct {
-                unsigned long mbz;     /* Must be zero: count_info is here. */
-                unsigned long type:5;   /* What kind of shadow is this? */
-                unsigned long pinned:1; /* Is the shadow pinned? */
-                unsigned long count:26; /* Reference count */
-            } __attribute__((packed));
-            union {
-                /* For unused shadow pages, a list of pages of this order; for 
-                 * pinnable shadows, if pinned, a list of other pinned shadows
-                 * (see sh_type_is_pinnable() below for the definition of 
-                 * "pinnable" shadow types). */
-                struct list_head list;
-                /* For non-pinnable shadows, a higher entry that points
-                 * at us. */
-                paddr_t up;
-            };
-        };
-    };
-};
-
-/* The structure above *must* be no larger than a struct page_info
- * from mm.h, since we'll be using the same space in the frametable. 
- * Also, the mbz field must line up with the count_info field of normal 
- * pages, so they cannot be successfully get_page()d. */
-static inline void shadow_check_page_struct_offsets(void) {
-    BUILD_BUG_ON(sizeof (struct shadow_page_info) !=
-                 sizeof (struct page_info));
-    BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) !=
-                 offsetof(struct page_info, count_info));
-};
-
 /* Shadow type codes */
 #define SH_type_none           (0U) /* on the shadow free list */
 #define SH_type_min_shadow     (1U)
@@ -532,13 +478,13 @@ mfn_t oos_snapshot_lookup(struct vcpu *v
 // in order to make it work with our mfn type.
 #undef mfn_to_page
 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
-#define mfn_to_shadow_page(_m) ((struct shadow_page_info *)mfn_to_page(_m))
+#define mfn_to_shadow_page mfn_to_page
 
 // Override page_to_mfn from asm/page.h, which was #include'd above,
 // in order to make it work with our mfn type.
 #undef page_to_mfn
 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
-#define shadow_page_to_mfn(_spg) (page_to_mfn((struct page_info *)_spg))
+#define shadow_page_to_mfn page_to_mfn
 
 // Override mfn_valid from asm/page.h, which was #include'd above,
 // in order to make it work with our mfn type.
@@ -679,22 +625,22 @@ static inline int sh_get_ref(struct vcpu
 
     ASSERT(mfn_valid(smfn));
 
-    x = sp->count;
+    x = sp->u.sh.count;
     nx = x + 1;
 
     if ( unlikely(nx >= 1U<<26) )
     {
-        SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
-                       sp->backpointer, mfn_x(smfn));
+        SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRpgmfn " smfn=%lx\n",
+                       sp->u.sh.back, mfn_x(smfn));
         return 0;
     }
     
     /* Guarded by the shadow lock, so no need for atomic update */
-    sp->count = nx;
+    sp->u.sh.count = nx;
 
     /* We remember the first shadow entry that points to each shadow. */
     if ( entry_pa != 0 
-         && !sh_type_is_pinnable(v, sp->type) 
+         && !sh_type_is_pinnable(v, sp->u.sh.type)
          && sp->up == 0 ) 
         sp->up = entry_pa;
     
@@ -710,26 +656,26 @@ static inline void sh_put_ref(struct vcp
     struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
 
     ASSERT(mfn_valid(smfn));
-    ASSERT(sp->mbz == 0);
+    ASSERT(sp->count_info == 0);
 
     /* If this is the entry in the up-pointer, remove it */
     if ( entry_pa != 0 
-         && !sh_type_is_pinnable(v, sp->type) 
+         && !sh_type_is_pinnable(v, sp->u.sh.type)
          && sp->up == entry_pa ) 
         sp->up = 0;
 
-    x = sp->count;
+    x = sp->u.sh.count;
     nx = x - 1;
 
     if ( unlikely(x == 0) ) 
     {
         SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
-                     mfn_x(smfn), sp->count, sp->type);
+                     mfn_x(smfn), sp->u.sh.count, sp->u.sh.type);
         BUG();
     }
 
     /* Guarded by the shadow lock, so no need for atomic update */
-    sp->count = nx;
+    sp->u.sh.count = nx;
 
     if ( unlikely(nx == 0) ) 
         sh_destroy_shadow(v, smfn);
@@ -745,22 +691,22 @@ static inline int sh_pin(struct vcpu *v,
     
     ASSERT(mfn_valid(smfn));
     sp = mfn_to_shadow_page(smfn);
-    ASSERT(sh_type_is_pinnable(v, sp->type));
-    if ( sp->pinned ) 
+    ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
+    if ( sp->u.sh.pinned )
     {
         /* Already pinned: take it out of the pinned-list so it can go 
          * at the front */
-        list_del(&sp->list);
+        page_list_del(sp, &v->domain->arch.paging.shadow.pinned_shadows);
     }
     else
     {
         /* Not pinned: pin it! */
         if ( !sh_get_ref(v, smfn, 0) )
             return 0;
-        sp->pinned = 1;
+        sp->u.sh.pinned = 1;
     }
     /* Put it at the head of the list of pinned shadows */
-    list_add(&sp->list, &v->domain->arch.paging.shadow.pinned_shadows);
+    page_list_add(sp, &v->domain->arch.paging.shadow.pinned_shadows);
     return 1;
 }
 
@@ -772,11 +718,11 @@ static inline void sh_unpin(struct vcpu 
     
     ASSERT(mfn_valid(smfn));
     sp = mfn_to_shadow_page(smfn);
-    ASSERT(sh_type_is_pinnable(v, sp->type));
-    if ( sp->pinned )
+    ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
+    if ( sp->u.sh.pinned )
     {
-        sp->pinned = 0;
-        list_del(&sp->list);
+        sp->u.sh.pinned = 0;
+        page_list_del(sp, &v->domain->arch.paging.shadow.pinned_shadows);
         sp->up = 0; /* in case this stops being a pinnable type in future */
         sh_put_ref(v, smfn, 0);
     }
--- 2009-01-30.orig/xen/include/asm-x86/mm.h    2009-01-30 10:13:53.000000000 
+0100
+++ 2009-01-30/xen/include/asm-x86/mm.h 2009-01-30 10:14:47.000000000 +0100
@@ -17,19 +17,39 @@
  */
 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
 
+/*
+ * This definition is solely for the use in struct page_info (and
+ * struct page_list_head), intended to allow easy adjustment once x86-64
+ * wants to support more than 16Tb.
+ * 'unsigned long' should be used for MFNs everywhere else.
+ */
+#define __mfn_t unsigned int
+#define PRpgmfn "08x"
+
 #ifndef __i386__
 # undef page_list_entry
 struct page_list_entry
 {
-    unsigned int next, prev;
-    unsigned long _pad_for_sh_; /* until struct shadow_page_info gets updated 
*/
+    __mfn_t next, prev;
 };
 #endif
 
 struct page_info
+/* Until all uses of the old type get cleaned up: */
+#define shadow_page_info page_info
 {
-    /* Each frame can be threaded onto a doubly-linked list. */
-    struct page_list_entry list;
+    union {
+        /* Each frame can be threaded onto a doubly-linked list.
+         *
+         * For unused shadow pages, a list of pages of this order; for
+         * pinnable shadows, if pinned, a list of other pinned shadows
+         * (see sh_type_is_pinnable() below for the definition of
+         * "pinnable" shadow types).
+         */
+        struct page_list_entry list;
+        /* For non-pinnable shadows, a higher entry that points at us. */
+        paddr_t up;
+    };
 
     /* Reference count and various PGC_xxx flags and fields. */
     unsigned long count_info;
@@ -45,6 +65,19 @@ struct page_info
             unsigned long type_info;
         } inuse;
 
+        /* Page is in use as a shadow: count_info == 0. */
+        struct {
+            unsigned long type:5;   /* What kind of shadow is this? */
+            unsigned long pinned:1; /* Is the shadow pinned? */
+            unsigned long count:26; /* Reference count */
+            union {
+                /* When in use, GMFN of guest page we're a shadow of. */
+                __mfn_t back;
+                /* When free, order of the freelist we're on. */
+                unsigned int order;
+            };
+        } sh;
+
         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
         struct {
             /* Order-size of the free chunk this page is the head of. */
@@ -104,9 +137,14 @@ struct page_info
          * tracked for TLB-flush avoidance when a guest runs in shadow mode.
          */
         u32 shadow_flags;
+
+        /* When in use as a shadow, next shadow in this hash chain. */
+        struct shadow_page_info *next_shadow;
     };
 };
 
+#undef __mfn_t
+
 #define PG_shift(idx)   (BITS_PER_LONG - (idx))
 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
 
--- 2009-01-30.orig/xen/include/asm-x86/domain.h        2009-01-30 
10:13:53.000000000 +0100
+++ 2009-01-30/xen/include/asm-x86/domain.h     2009-01-30 10:14:47.000000000 
+0100
@@ -79,10 +79,10 @@ struct shadow_domain {
     int               locker; /* processor which holds the lock */
     const char       *locker_function; /* Func that took it */
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
-    struct list_head  pinned_shadows;
+    struct page_list_head pinned_shadows;
 
     /* Memory allocation */
-    struct list_head  freelists[SHADOW_MAX_ORDER + 1];
+    struct page_list_head freelists[SHADOW_MAX_ORDER + 1];
     struct page_list_head p2m_freelist;
     unsigned int      total_pages;  /* number of pages allocated */
     unsigned int      free_pages;   /* number of pages on freelists */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.