[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: page_info cleanups.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1233315207 0
# Node ID 2e1734aa8db370304091e2d5271577e9a003f200
# Parent  86159a906bec41595484e75f4bd96a4a1c871c18
x86: page_info cleanups.
 1. No reason for i386 not to use the same definitions as x64
 2. No need for shadow_page_info names to hang around.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |   90 ++++++++++++++++++--------------------
 xen/arch/x86/mm/shadow/multi.c   |   92 +++++++++++++++++++--------------------
 xen/arch/x86/mm/shadow/private.h |   14 ++---
 xen/include/asm-x86/domain.h     |    2 
 xen/include/asm-x86/mm.h         |   12 -----
 5 files changed, 98 insertions(+), 112 deletions(-)

diff -r 86159a906bec -r 2e1734aa8db3 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Jan 30 11:16:52 2009 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Jan 30 11:33:27 2009 +0000
@@ -1291,7 +1291,7 @@ static inline int space_is_available(
     for ( ; order <= shadow_max_order(d); ++order )
     {
         unsigned int n = count;
-        const struct shadow_page_info *sp;
+        const struct page_info *sp;
 
         page_list_for_each ( sp, &d->arch.paging.shadow.freelists[order] )
             if ( --n == 0 )
@@ -1306,7 +1306,7 @@ static inline int space_is_available(
  * non-Xen mappings in this top-level shadow mfn */
 static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
 {
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
     switch ( sp->u.sh.type )
     {
     case SH_type_l2_32_shadow:
@@ -1334,7 +1334,7 @@ static inline void trace_shadow_prealloc
         /* Convert smfn to gfn */
         unsigned long gfn;
         ASSERT(mfn_valid(smfn));
-        gfn = mfn_to_gfn(d, _mfn(mfn_to_shadow_page(smfn)->v.sh.back));
+        gfn = mfn_to_gfn(d, _mfn(mfn_to_page(smfn)->v.sh.back));
         __trace_var(TRC_SHADOW_PREALLOC_UNPIN, 0/*!tsc*/,
                     sizeof(gfn), (unsigned char*)&gfn);
     }
@@ -1350,7 +1350,7 @@ static void _shadow_prealloc(
     /* Need a vpcu for calling unpins; for now, since we don't have
      * per-vcpu shadows, any will do */
     struct vcpu *v, *v2;
-    struct shadow_page_info *sp, *t;
+    struct page_info *sp, *t;
     mfn_t smfn;
     int i;
 
@@ -1366,7 +1366,7 @@ static void _shadow_prealloc(
     perfc_incr(shadow_prealloc_1);
     page_list_for_each_safe_reverse(sp, t, 
&d->arch.paging.shadow.pinned_shadows)
     {
-        smfn = shadow_page_to_mfn(sp);
+        smfn = page_to_mfn(sp);
 
         /* Unpin this top-level shadow */
         trace_shadow_prealloc_unpin(d, smfn);
@@ -1425,7 +1425,7 @@ void shadow_prealloc(struct domain *d, u
  * this domain's shadows */
 static void shadow_blow_tables(struct domain *d) 
 {
-    struct shadow_page_info *sp, *t;
+    struct page_info *sp, *t;
     struct vcpu *v = d->vcpu[0];
     mfn_t smfn;
     int i;
@@ -1435,7 +1435,7 @@ static void shadow_blow_tables(struct do
     /* Pass one: unpin all pinned pages */
     page_list_for_each_safe_reverse(sp, t, 
&d->arch.paging.shadow.pinned_shadows)
     {
-        smfn = shadow_page_to_mfn(sp);
+        smfn = page_to_mfn(sp);
         sh_unpin(v, smfn);
     }
         
@@ -1489,21 +1489,17 @@ __initcall(shadow_blow_tables_keyhandler
 __initcall(shadow_blow_tables_keyhandler_init);
 #endif /* !NDEBUG */
 
-#ifdef __i386__
-# define next_shadow(pg) ((pg)->next_shadow)
-# define set_next_shadow(pg, n) ((void)((pg)->next_shadow = (n)))
-#else
-static inline struct shadow_page_info *
-next_shadow(const struct shadow_page_info *sp)
-{
-    return sp->next_shadow ? mfn_to_shadow_page(_mfn(sp->next_shadow)) : NULL;
-}
+static inline struct page_info *
+next_shadow(const struct page_info *sp)
+{
+    return sp->next_shadow ? mfn_to_page(_mfn(sp->next_shadow)) : NULL;
+}
+
 static inline void
-set_next_shadow(struct shadow_page_info *sp, struct shadow_page_info *next)
-{
-    sp->next_shadow = next ? mfn_x(shadow_page_to_mfn(next)) : 0;
-}
-#endif
+set_next_shadow(struct page_info *sp, struct page_info *next)
+{
+    sp->next_shadow = next ? mfn_x(page_to_mfn(next)) : 0;
+}
 
 /* Allocate another shadow's worth of (contiguous, aligned) pages,
  * and fill in the type and backpointer fields of their page_infos. 
@@ -1512,7 +1508,7 @@ mfn_t shadow_alloc(struct domain *d,
                     u32 shadow_type,
                     unsigned long backpointer)
 {
-    struct shadow_page_info *sp = NULL;
+    struct page_info *sp = NULL;
     unsigned int order = shadow_order(shadow_type);
     cpumask_t mask;
     void *p;
@@ -1561,7 +1557,7 @@ mfn_t shadow_alloc(struct domain *d,
             flush_tlb_mask(mask);
         }
         /* Now safe to clear the page for reuse */
-        p = sh_map_domain_page(shadow_page_to_mfn(sp+i));
+        p = sh_map_domain_page(page_to_mfn(sp+i));
         ASSERT(p != NULL);
         clear_page(p);
         sh_unmap_domain_page(p);
@@ -1573,14 +1569,14 @@ mfn_t shadow_alloc(struct domain *d,
         set_next_shadow(&sp[i], NULL);
         perfc_incr(shadow_alloc_count);
     }
-    return shadow_page_to_mfn(sp);
+    return page_to_mfn(sp);
 }
 
 
 /* Return some shadow pages to the pool. */
 void shadow_free(struct domain *d, mfn_t smfn)
 {
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn); 
+    struct page_info *sp = mfn_to_page(smfn); 
     u32 shadow_type;
     unsigned long order;
     unsigned long mask;
@@ -1626,7 +1622,7 @@ void shadow_free(struct domain *d, mfn_t
     for ( ; order < shadow_max_order(d); ++order )
     {
         mask = 1 << order;
-        if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
+        if ( (mfn_x(page_to_mfn(sp)) & mask) ) {
             /* Merge with predecessor block? */
             if ( ((sp-mask)->u.sh.type != PGT_none) ||
                  ((sp-mask)->v.free.order != order) )
@@ -1787,7 +1783,7 @@ static unsigned int sh_set_allocation(st
                                       unsigned int pages,
                                       int *preempted)
 {
-    struct shadow_page_info *sp;
+    struct page_info *sp;
     unsigned int lower_bound;
     unsigned int j, order = shadow_max_order(d);
 
@@ -1809,7 +1805,7 @@ static unsigned int sh_set_allocation(st
         if ( d->arch.paging.shadow.total_pages < pages ) 
         {
             /* Need to allocate more memory from domheap */
-            sp = (struct shadow_page_info *)
+            sp = (struct page_info *)
                 alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));
             if ( sp == NULL ) 
             { 
@@ -1890,7 +1886,7 @@ static void sh_hash_audit_bucket(struct 
 static void sh_hash_audit_bucket(struct domain *d, int bucket)
 /* Audit one bucket of the hash table */
 {
-    struct shadow_page_info *sp, *x;
+    struct page_info *sp, *x;
 
     if ( !(SHADOW_AUDIT_ENABLE) )
         return;
@@ -1931,7 +1927,7 @@ static void sh_hash_audit_bucket(struct 
                         SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by 
%#"PRI_mfn")"
                                      " and not OOS but has typecount %#lx\n",
                                      sp->v.sh.back,
-                                     mfn_x(shadow_page_to_mfn(sp)), 
+                                     mfn_x(page_to_mfn(sp)), 
                                      gpg->u.inuse.type_info);
                         BUG();
                     }
@@ -1944,7 +1940,7 @@ static void sh_hash_audit_bucket(struct 
             {
                 SHADOW_ERROR("MFN %#"PRpgmfn" shadowed (by %#"PRI_mfn")"
                              " but has typecount %#lx\n",
-                             sp->v.sh.back, mfn_x(shadow_page_to_mfn(sp)),
+                             sp->v.sh.back, mfn_x(page_to_mfn(sp)),
                              gpg->u.inuse.type_info);
                 BUG();
             }
@@ -1983,15 +1979,15 @@ static void sh_hash_audit(struct domain 
  * Returns 0 for success, 1 for error. */
 static int shadow_hash_alloc(struct domain *d)
 {
-    struct shadow_page_info **table;
+    struct page_info **table;
 
     ASSERT(shadow_locked_by_me(d));
     ASSERT(!d->arch.paging.shadow.hash_table);
 
-    table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
+    table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
     if ( !table ) return 1;
     memset(table, 0, 
-           SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));
+           SHADOW_HASH_BUCKETS * sizeof (struct page_info *));
     d->arch.paging.shadow.hash_table = table;
     return 0;
 }
@@ -2013,7 +2009,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
  * or INVALID_MFN if it doesn't exist */
 {
     struct domain *d = v->domain;
-    struct shadow_page_info *sp, *prev;
+    struct page_info *sp, *prev;
     key_t key;
 
     ASSERT(shadow_locked_by_me(d));
@@ -2037,7 +2033,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
             {
                 if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
                     /* Can't reorder: someone is walking the hash chains */
-                    return shadow_page_to_mfn(sp);
+                    return page_to_mfn(sp);
                 else 
                 {
                     ASSERT(prev);
@@ -2052,7 +2048,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
             {
                 perfc_incr(shadow_hash_lookup_head);
             }
-            return shadow_page_to_mfn(sp);
+            return page_to_mfn(sp);
         }
         prev = sp;
         sp = next_shadow(sp);
@@ -2067,7 +2063,7 @@ void shadow_hash_insert(struct vcpu *v, 
 /* Put a mapping (n,t)->smfn into the hash table */
 {
     struct domain *d = v->domain;
-    struct shadow_page_info *sp;
+    struct page_info *sp;
     key_t key;
     
     ASSERT(shadow_locked_by_me(d));
@@ -2081,7 +2077,7 @@ void shadow_hash_insert(struct vcpu *v, 
     sh_hash_audit_bucket(d, key);
     
     /* Insert this shadow at the top of the bucket */
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
     d->arch.paging.shadow.hash_table[key] = sp;
     
@@ -2093,7 +2089,7 @@ void shadow_hash_delete(struct vcpu *v, 
 /* Excise the mapping (n,t)->smfn from the hash table */
 {
     struct domain *d = v->domain;
-    struct shadow_page_info *sp, *x;
+    struct page_info *sp, *x;
     key_t key;
 
     ASSERT(shadow_locked_by_me(d));
@@ -2106,7 +2102,7 @@ void shadow_hash_delete(struct vcpu *v, 
     key = sh_hash(n, t);
     sh_hash_audit_bucket(d, key);
     
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     if ( d->arch.paging.shadow.hash_table[key] == sp ) 
         /* Easy case: we're deleting the head item. */
         d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
@@ -2148,7 +2144,7 @@ static void hash_foreach(struct vcpu *v,
 {
     int i, done = 0;
     struct domain *d = v->domain;
-    struct shadow_page_info *x;
+    struct page_info *x;
 
     /* Say we're here, to stop hash-lookups reordering the chains */
     ASSERT(shadow_locked_by_me(d));
@@ -2166,7 +2162,7 @@ static void hash_foreach(struct vcpu *v,
             {
                 ASSERT(x->u.sh.type <= 15);
                 ASSERT(callbacks[x->u.sh.type] != NULL);
-                done = callbacks[x->u.sh.type](v, shadow_page_to_mfn(x),
+                done = callbacks[x->u.sh.type](v, page_to_mfn(x),
                                                callback_mfn);
                 if ( done ) break;
             }
@@ -2184,7 +2180,7 @@ static void hash_foreach(struct vcpu *v,
 
 void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
 {
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
     unsigned int t = sp->u.sh.type;
 
 
@@ -2449,7 +2445,7 @@ int sh_remove_write_access(struct vcpu *
     {
         unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
         mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
-        int shtype = mfn_to_shadow_page(last_smfn)->u.sh.type;
+        int shtype = mfn_to_page(last_smfn)->u.sh.type;
 
         if ( callbacks[shtype] ) 
             callbacks[shtype](v, last_smfn, gmfn);
@@ -2492,7 +2488,7 @@ int sh_remove_write_access_from_sl1p(str
 int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
                                      mfn_t smfn, unsigned long off)
 {
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
     
     ASSERT(mfn_valid(smfn));
     ASSERT(mfn_valid(gmfn));
@@ -2612,7 +2608,7 @@ static int sh_remove_shadow_via_pointer(
 /* Follow this shadow's up-pointer, if it has one, and remove the reference
  * found there.  Returns 1 if that was the only reference to this shadow */
 {
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
     mfn_t pmfn;
     void *vaddr;
     int rc;
diff -r 86159a906bec -r 2e1734aa8db3 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Fri Jan 30 11:16:52 2009 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Fri Jan 30 11:33:27 2009 +0000
@@ -973,7 +973,7 @@ static int shadow_set_l2e(struct vcpu *v
         }
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
         {
-            struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn);
+            struct page_info *sp = mfn_to_page(sl1mfn);
             mfn_t gl1mfn = _mfn(sp->v.sh.back);
 
             /* If the shadow is a fl1 then the backpointer contains
@@ -1194,8 +1194,8 @@ do {                                    
 do {                                                                    \
     int _i;                                                             \
     shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \
-    ASSERT(mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
-           || mfn_to_shadow_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
+    ASSERT(mfn_to_page(_sl1mfn)->u.sh.type == SH_type_l1_shadow  \
+           || mfn_to_page(_sl1mfn)->u.sh.type == SH_type_fl1_shadow);\
     for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         (_sl1e) = _sp + _i;                                             \
@@ -1232,7 +1232,7 @@ do {                                    
 do {                                                                      \
     int _i, _j, __done = 0;                                               \
     int _xen = !shadow_mode_external(_dom);                               \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
+    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
     for ( _j = 0; _j < 4 && !__done; _j++ )                               \
     {                                                                     \
         shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \
@@ -1260,11 +1260,11 @@ do {                                    
     int _i;                                                                \
     int _xen = !shadow_mode_external(_dom);                                \
     shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
-           || mfn_to_shadow_page(_sl2mfn)->u.sh.type == 
SH_type_l2h_pae_shadow);\
+    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
+           || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \
         if ( (!(_xen))                                                     \
-             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != 
SH_type_l2h_pae_shadow\
+             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
              || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \
                  < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
         {                                                                  \
@@ -1285,13 +1285,13 @@ do {                                    
     int _i;                                                                 \
     int _xen = !shadow_mode_external(_dom);                                 \
     shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \
-    ASSERT(mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
-           mfn_to_shadow_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
+    ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_64_shadow ||\
+           mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_64_shadow);\
     for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
     {                                                                       \
         if ( (!(_xen))                                                      \
              || !is_pv_32on64_domain(_dom)                                  \
-             || mfn_to_shadow_page(_sl2mfn)->u.sh.type != 
SH_type_l2h_64_shadow\
+             || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_64_shadow\
              || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
         {                                                                   \
             (_sl2e) = _sp + _i;                                             \
@@ -1313,7 +1313,7 @@ do {                                    
 do {                                                                    \
     int _i;                                                             \
     shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \
-    ASSERT(mfn_to_shadow_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
+    ASSERT(mfn_to_page(_sl3mfn)->u.sh.type == SH_type_l3_64_shadow);\
     for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         (_sl3e) = _sp + _i;                                             \
@@ -1331,7 +1331,7 @@ do {                                    
     shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \
     int _xen = !shadow_mode_external(_dom);                             \
     int _i;                                                             \
-    ASSERT(mfn_to_shadow_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
+    ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\
     for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \
     {                                                                   \
         if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \
@@ -1506,7 +1506,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
          && shadow_type != SH_type_l2h_pae_shadow 
          && shadow_type != SH_type_l4_64_shadow )
         /* Lower-level shadow, not yet linked form a higher level */
-        mfn_to_shadow_page(smfn)->up = 0;
+        mfn_to_page(smfn)->up = 0;
 
 #if GUEST_PAGING_LEVELS == 4
 #if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
@@ -1519,7 +1519,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
          * of them, decide that this isn't an old linux guest, and stop
          * pinning l3es.  This is not very quick but it doesn't happen
          * very often. */
-        struct shadow_page_info *sp, *t;
+        struct page_info *sp, *t;
         struct vcpu *v2;
         int l4count = 0, vcpus = 0;
         page_list_for_each(sp, &v->domain->arch.paging.shadow.pinned_shadows)
@@ -1535,7 +1535,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
             page_list_for_each_safe(sp, t, 
&v->domain->arch.paging.shadow.pinned_shadows)
             {
                 if ( sp->u.sh.type == SH_type_l3_64_shadow )
-                    sh_unpin(v, shadow_page_to_mfn(sp));
+                    sh_unpin(v, page_to_mfn(sp));
             }
             v->domain->arch.paging.shadow.opt_flags &= 
~SHOPT_LINUX_L3_TOPLEVEL;
         }
@@ -1918,7 +1918,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
 void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l4e_t *sl4e;
-    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+    u32 t = mfn_to_page(smfn)->u.sh.type;
     mfn_t gmfn, sl4mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1926,7 +1926,7 @@ void sh_destroy_l4_shadow(struct vcpu *v
     ASSERT(t == SH_type_l4_shadow);
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
     /* Decrement refcounts of all the old entries */
@@ -1947,7 +1947,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
 void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l3e_t *sl3e;
-    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+    u32 t = mfn_to_page(smfn)->u.sh.type;
     mfn_t gmfn, sl3mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1955,7 +1955,7 @@ void sh_destroy_l3_shadow(struct vcpu *v
     ASSERT(t == SH_type_l3_shadow);
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
 
@@ -1977,7 +1977,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
 void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
 {
     shadow_l2e_t *sl2e;
-    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+    u32 t = mfn_to_page(smfn)->u.sh.type;
     mfn_t gmfn, sl2mfn;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
@@ -1990,7 +1990,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
 #endif
 
     /* Record that the guest page isn't shadowed any more (in this type) */
-    gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+    gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
     delete_shadow_status(v, gmfn, t, smfn);
     shadow_demote(v, gmfn, t);
 
@@ -2011,7 +2011,7 @@ void sh_destroy_l1_shadow(struct vcpu *v
 {
     struct domain *d = v->domain;
     shadow_l1e_t *sl1e;
-    u32 t = mfn_to_shadow_page(smfn)->u.sh.type;
+    u32 t = mfn_to_page(smfn)->u.sh.type;
 
     SHADOW_DEBUG(DESTROY_SHADOW,
                   "%s(%05lx)\n", __func__, mfn_x(smfn));
@@ -2020,12 +2020,12 @@ void sh_destroy_l1_shadow(struct vcpu *v
     /* Record that the guest page isn't shadowed any more (in this type) */
     if ( t == SH_type_fl1_shadow )
     {
-        gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->v.sh.back);
+        gfn_t gfn = _gfn(mfn_to_page(smfn)->v.sh.back);
         delete_fl1_shadow_status(v, gfn, smfn);
     }
     else 
     {
-        mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->v.sh.back);
+        mfn_t gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
         delete_shadow_status(v, gmfn, t, smfn);
         shadow_demote(v, gmfn, t);
     }
@@ -2051,7 +2051,7 @@ void sh_destroy_monitor_table(struct vcp
 void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
 {
     struct domain *d = v->domain;
-    ASSERT(mfn_to_shadow_page(mmfn)->u.sh.type == SH_type_monitor_table);
+    ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
 
 #if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
     {
@@ -2295,7 +2295,7 @@ static int validate_gl2e(struct vcpu *v,
 
 #if SHADOW_PAGING_LEVELS == 3
         reserved_xen_slot = 
-            ((mfn_to_shadow_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) 
&&
+            ((mfn_to_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
              (shadow_index 
               >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
 #else /* SHADOW_PAGING_LEVELS == 2 */
@@ -2349,7 +2349,7 @@ static int validate_gl1e(struct vcpu *v,
     result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+    gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
     if ( mfn_valid(gl1mfn) 
          && mfn_is_out_of_sync(gl1mfn) )
     {
@@ -2426,14 +2426,14 @@ void sh_resync_l1(struct vcpu *v, mfn_t 
  *      called in the *mode* of the vcpu that unsynced it.  Clear?  Good. */
 int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
 {
-    struct shadow_page_info *sp;
+    struct page_info *sp;
     mfn_t smfn;
 
     smfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
     ASSERT(mfn_valid(smfn)); /* Otherwise we would not have been called */
     
     /* Up to l2 */
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
@@ -2441,14 +2441,14 @@ int sh_safe_not_to_sync(struct vcpu *v, 
 
 #if (SHADOW_PAGING_LEVELS == 4) 
     /* up to l3 */
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
     smfn = _mfn(sp->up >> PAGE_SHIFT);
     ASSERT(mfn_valid(smfn));
 
     /* up to l4 */
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     if ( sp->u.sh.count != 1
          || sh_type_is_pinnable(v, SH_type_l3_64_shadow) || !sp->up )
         return 0;
@@ -2967,7 +2967,7 @@ static int sh_page_fault(struct vcpu *v,
                                         + shadow_l2_linear_offset(va)),
                                        sizeof(sl2e)) != 0)
                      || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
-                     || !mfn_valid(gl1mfn = _mfn(mfn_to_shadow_page(
+                     || !mfn_valid(gl1mfn = _mfn(mfn_to_page(
                                       shadow_l2e_get_mfn(sl2e))->v.sh.back))
                      || unlikely(mfn_is_out_of_sync(gl1mfn)) )
                {
@@ -3520,7 +3520,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
     // easier than invalidating all of the individual 4K pages).
     //
     sl1mfn = shadow_l2e_get_mfn(sl2e);
-    if ( mfn_to_shadow_page(sl1mfn)->u.sh.type
+    if ( mfn_to_page(sl1mfn)->u.sh.type
          == SH_type_fl1_shadow )
     {
         flush_tlb_local();
@@ -3530,7 +3530,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Check to see if the SL1 is out of sync. */
     {
-        mfn_t gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+        mfn_t gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
         struct page_info *pg = mfn_to_page(gl1mfn);
         if ( mfn_valid(gl1mfn) 
              && page_is_out_of_sync(pg) )
@@ -3560,7 +3560,7 @@ sh_invlpg(struct vcpu *v, unsigned long 
             }
 
             sl1mfn = shadow_l2e_get_mfn(sl2e);
-            gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+            gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
             pg = mfn_to_page(gl1mfn);
             
             if ( likely(sh_mfn_is_a_page_table(gl1mfn)
@@ -3965,7 +3965,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
         /* Need to repin the old toplevel shadow if it's been unpinned
          * by shadow_prealloc(): in PV mode we're still running on this
          * shadow and it's not safe to free it yet. */
-        if ( !mfn_to_shadow_page(old_smfn)->u.sh.pinned && !sh_pin(v, 
old_smfn) )
+        if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
         {
             SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
             domain_crash(v->domain);
@@ -4259,12 +4259,12 @@ int sh_rm_write_access_from_sl1p(struct 
 {
     int r;
     shadow_l1e_t *sl1p, sl1e;
-    struct shadow_page_info *sp;
+    struct page_info *sp;
 
     ASSERT(mfn_valid(gmfn));
     ASSERT(mfn_valid(smfn));
 
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
 
     if ( sp->count_info != 0
          || (sp->u.sh.type != SH_type_l1_shadow
@@ -4407,7 +4407,7 @@ void sh_clear_shadow_entry(struct vcpu *
 void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
 /* Blank out a single shadow entry */
 {
-    switch ( mfn_to_shadow_page(smfn)->u.sh.type )
+    switch ( mfn_to_page(smfn)->u.sh.type )
     {
     case SH_type_l1_shadow:
         (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
@@ -4440,7 +4440,7 @@ int sh_remove_l1_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
         {
             (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
-            if ( mfn_to_shadow_page(sl1mfn)->u.sh.type == 0 )
+            if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4463,7 +4463,7 @@ int sh_remove_l2_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
         {
             (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
-            if ( mfn_to_shadow_page(sl2mfn)->u.sh.type == 0 )
+            if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4485,7 +4485,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 
              && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
         {
             (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
-            if ( mfn_to_shadow_page(sl3mfn)->u.sh.type == 0 )
+            if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
                 done = 1;
         }
@@ -4887,7 +4887,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
     int done = 0;
     
     /* Follow the backpointer */
-    gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->v.sh.back);
+    gl1mfn = _mfn(mfn_to_page(sl1mfn)->v.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Out-of-sync l1 shadows can contain anything: just check the OOS hash */
@@ -4977,7 +4977,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->v.sh.back);
+    gl2mfn = _mfn(mfn_to_page(sl2mfn)->v.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
     /* Only L1's may be out of sync. */
@@ -5026,7 +5026,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->v.sh.back);
+    gl3mfn = _mfn(mfn_to_page(sl3mfn)->v.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Only L1's may be out of sync. */
@@ -5073,7 +5073,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
     int done = 0;
 
     /* Follow the backpointer */
-    gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->v.sh.back);
+    gl4mfn = _mfn(mfn_to_page(sl4mfn)->v.sh.back);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
     /* Only L1's may be out of sync. */
diff -r 86159a906bec -r 2e1734aa8db3 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Fri Jan 30 11:16:52 2009 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Fri Jan 30 11:33:27 2009 +0000
@@ -478,13 +478,11 @@ mfn_t oos_snapshot_lookup(struct vcpu *v
 // in order to make it work with our mfn type.
 #undef mfn_to_page
 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
-#define mfn_to_shadow_page mfn_to_page
 
 // Override page_to_mfn from asm/page.h, which was #include'd above,
 // in order to make it work with our mfn type.
 #undef page_to_mfn
 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
-#define shadow_page_to_mfn page_to_mfn
 
 // Override mfn_valid from asm/page.h, which was #include'd above,
 // in order to make it work with our mfn type.
@@ -621,7 +619,7 @@ static inline int sh_get_ref(struct vcpu
 static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
 {
     u32 x, nx;
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
 
     ASSERT(mfn_valid(smfn));
 
@@ -653,7 +651,7 @@ static inline void sh_put_ref(struct vcp
 static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
 {
     u32 x, nx;
-    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+    struct page_info *sp = mfn_to_page(smfn);
 
     ASSERT(mfn_valid(smfn));
     ASSERT(sp->count_info == 0);
@@ -687,10 +685,10 @@ static inline void sh_put_ref(struct vcp
  * Returns 0 for failure, 1 for success. */
 static inline int sh_pin(struct vcpu *v, mfn_t smfn)
 {
-    struct shadow_page_info *sp;
+    struct page_info *sp;
     
     ASSERT(mfn_valid(smfn));
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
     if ( sp->u.sh.pinned )
     {
@@ -714,10 +712,10 @@ static inline int sh_pin(struct vcpu *v,
  * of pinned shadows, and release the extra ref. */
 static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
 {
-    struct shadow_page_info *sp;
+    struct page_info *sp;
     
     ASSERT(mfn_valid(smfn));
-    sp = mfn_to_shadow_page(smfn);
+    sp = mfn_to_page(smfn);
     ASSERT(sh_type_is_pinnable(v, sp->u.sh.type));
     if ( sp->u.sh.pinned )
     {
diff -r 86159a906bec -r 2e1734aa8db3 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Fri Jan 30 11:16:52 2009 +0000
+++ b/xen/include/asm-x86/domain.h      Fri Jan 30 11:33:27 2009 +0000
@@ -92,7 +92,7 @@ struct shadow_domain {
     pagetable_t unpaged_pagetable;
 
     /* Shadow hashtable */
-    struct shadow_page_info **hash_table;
+    struct page_info **hash_table;
     int hash_walking;  /* Some function is walking the hash table */
 
     /* Fast MMIO path heuristic */
diff -r 86159a906bec -r 2e1734aa8db3 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Fri Jan 30 11:16:52 2009 +0000
+++ b/xen/include/asm-x86/mm.h  Fri Jan 30 11:33:27 2009 +0000
@@ -20,23 +20,19 @@
 /*
  * This definition is solely for the use in struct page_info (and
  * struct page_list_head), intended to allow easy adjustment once x86-64
- * wants to support more than 16Tb.
+ * wants to support more than 16TB.
  * 'unsigned long' should be used for MFNs everywhere else.
  */
 #define __mfn_t unsigned int
 #define PRpgmfn "08x"
 
-#ifndef __i386__
-# undef page_list_entry
+#undef page_list_entry
 struct page_list_entry
 {
     __mfn_t next, prev;
 };
-#endif
 
 struct page_info
-/* Until all uses of the old type get cleaned up: */
-#define shadow_page_info page_info
 {
     union {
         /* Each frame can be threaded onto a doubly-linked list.
@@ -151,11 +147,7 @@ struct page_info
         u32 shadow_flags;
 
         /* When in use as a shadow, next shadow in this hash chain. */
-#ifdef __i386__
-        struct shadow_page_info *next_shadow;
-#else
         __mfn_t next_shadow;
-#endif
     };
 };
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.