[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Remove direct references to frame_table array. Use



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3d1c7be170a7b27adc58309961ce7d476984874b
# Parent  1283d309a6034aa99b8347e1500f2f47e0b65e6a
Remove direct references to frame_table array. Use
pfn_to_page (or page_to_pfn) instead.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/audit.c      Tue Dec 20 11:46:56 2005
@@ -61,7 +61,7 @@
 #ifdef __i386__
 #ifdef CONFIG_X86_PAE
         /* 32b PAE */
-        if ( (( frame_table[mfn].u.inuse.type_info & PGT_va_mask ) 
+        if ( (( pfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
            >> PGT_va_shift) == 3 )
             return l2_table_offset(HYPERVISOR_VIRT_START); 
         else
@@ -364,7 +364,7 @@
             {
                 gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
                 smfn = a->smfn;
-                page = &frame_table[smfn];
+                page = pfn_to_page(smfn);
 
                 switch ( a->gpfn_and_flags & PGT_type_mask ) {
                 case PGT_writable_pred:
@@ -433,11 +433,13 @@
         for_each_vcpu(d, v)
         {
             if ( pagetable_get_paddr(v->arch.guest_table) )
-                adjust(&frame_table[pagetable_get_pfn(v->arch.guest_table)], 
!shadow_mode_refcounts(d));
+                adjust(pfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
+                       !shadow_mode_refcounts(d));
             if ( pagetable_get_paddr(v->arch.shadow_table) )
-                adjust(&frame_table[pagetable_get_pfn(v->arch.shadow_table)], 
0);
+                adjust(pfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
+                       0);
             if ( v->arch.monitor_shadow_ref )
-                adjust(&frame_table[v->arch.monitor_shadow_ref], 0);
+                adjust(pfn_to_page(v->arch.monitor_shadow_ref), 0);
         }
     }
 
@@ -617,7 +619,7 @@
     void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
                              unsigned long mfn)
     {
-        struct pfn_info *page = &frame_table[mfn];
+        struct pfn_info *page = pfn_to_page(mfn);
         l1_pgentry_t *pt = map_domain_page(mfn);
         int i;
 
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/dom0_ops.c   Tue Dec 20 11:46:56 2005
@@ -210,7 +210,7 @@
              unlikely((d = find_domain_by_id(dom)) == NULL) )
             break;
 
-        page = &frame_table[pfn];
+        page = pfn_to_page(pfn);
 
         if ( likely(get_page(page, d)) )
         {
@@ -285,7 +285,7 @@
                 struct pfn_info *page;
                 unsigned long mfn = l_arr[j];
 
-                page = &frame_table[mfn];
+                page = pfn_to_page(mfn);
 
                 if ( likely(pfn_valid(mfn) && get_page(page, d)) ) 
                 {
@@ -350,15 +350,14 @@
             list_ent = d->page_list.next;
             for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
             {
-                pfn = list_entry(list_ent, struct pfn_info, list) - 
-                    frame_table;
+                pfn = page_to_pfn(list_entry(list_ent, struct pfn_info, list));
                 if ( put_user(pfn, buffer) )
                 {
                     ret = -EFAULT;
                     break;
                 }
                 buffer++;
-                list_ent = frame_table[pfn].list.next;
+                list_ent = pfn_to_page(pfn)->list.next;
             }
             spin_unlock(&d->page_alloc_lock);
 
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/domain.c     Tue Dec 20 11:46:56 2005
@@ -190,7 +190,7 @@
         list_for_each_entry ( page, &d->page_list, list )
         {
             printk("Page %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
-                   _p(page_to_phys(page)), _p(page - frame_table),
+                   _p(page_to_phys(page)), _p(page_to_pfn(page)),
                    page->count_info, page->u.inuse.type_info);
         }
     }
@@ -198,13 +198,13 @@
     list_for_each_entry ( page, &d->xenpage_list, list )
     {
         printk("XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
-               _p(page_to_phys(page)), _p(page - frame_table),
+               _p(page_to_phys(page)), _p(page_to_pfn(page)),
                page->count_info, page->u.inuse.type_info);
     }
 
     page = virt_to_page(d->shared_info);
     printk("Shared_info@%p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
-           _p(page_to_phys(page)), _p(page - frame_table), page->count_info,
+           _p(page_to_phys(page)), _p(page_to_pfn(page)), page->count_info,
            page->u.inuse.type_info);
 }
 
@@ -391,19 +391,19 @@
 
     if ( shadow_mode_refcounts(d) )
     {
-        if ( !get_page(&frame_table[phys_basetab>>PAGE_SHIFT], d) )
+        if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
             return -EINVAL;
     }
     else if ( !(c->flags & VGCF_VMX_GUEST) )
     {
-        if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
+        if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
                                 PGT_base_page_table) )
             return -EINVAL;
     }
 
     if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
     {
-        put_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT]);
+        put_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT));
         return rc;
     }
 
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/domain_build.c       Tue Dec 20 11:46:56 2005
@@ -405,7 +405,7 @@
         *l1tab = l1e_from_pfn(mfn, L1_PROT);
         l1tab++;
         
-        page = &frame_table[mfn];
+        page = pfn_to_page(mfn);
         if ( !get_page_and_type(page, d, PGT_writable_page) )
             BUG();
 
@@ -418,7 +418,7 @@
     l1tab += l1_table_offset(vpt_start);
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
-        page = &frame_table[l1e_get_pfn(*l1tab)];
+        page = pfn_to_page(l1e_get_pfn(*l1tab));
         if ( !opt_dom0_shadow )
             l1e_remove_flags(*l1tab, _PAGE_RW);
         else
@@ -548,7 +548,7 @@
         *l1tab = l1e_from_pfn(mfn, L1_PROT);
         l1tab++;
 
-        page = &frame_table[mfn];
+        page = pfn_to_page(mfn);
         if ( (page->u.inuse.type_info == 0) &&
              !get_page_and_type(page, d, PGT_writable_page) )
             BUG();
@@ -567,7 +567,7 @@
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
         l1e_remove_flags(*l1tab, _PAGE_RW);
-        page = &frame_table[l1e_get_pfn(*l1tab)];
+        page = pfn_to_page(l1e_get_pfn(*l1tab));
 
         /* Read-only mapping + PGC_allocated + page-table page. */
         page->count_info         = PGC_allocated | 3;
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/mm.c Tue Dec 20 11:46:56 2005
@@ -202,7 +202,7 @@
     /* First 1MB of RAM is historically marked as I/O. */
     for ( i = 0; i < 0x100; i++ )
     {
-        page = &frame_table[i];
+        page = pfn_to_page(i);
         page->count_info        = PGC_allocated | 1;
         page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
         page_set_owner(page, dom_io);
@@ -216,10 +216,10 @@
         /* Every page from cursor to start of next RAM region is I/O. */
         rstart_pfn = PFN_UP(e820.map[i].addr);
         rend_pfn   = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-        while ( pfn < rstart_pfn )
+        for ( ; pfn < rstart_pfn; pfn++ )
         {
             BUG_ON(!pfn_valid(pfn));
-            page = &frame_table[pfn++];
+            page = pfn_to_page(pfn);
             page->count_info        = PGC_allocated | 1;
             page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
             page_set_owner(page, dom_io);
@@ -253,7 +253,7 @@
         pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
         if ( pfn == 0 ) continue;
         v->arch.perdomain_ptes[i] = l1e_empty();
-        page = &frame_table[pfn];
+        page = pfn_to_page(pfn);
         ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
         put_page_and_type(page);
@@ -320,13 +320,13 @@
     if ( unlikely(!VALID_MFN(gmfn)) )
         return 0;
 
-    res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
+    res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
 
     if ( !res && unlikely(shadow_mode_refcounts(d)) )
     {
         shadow_lock(d);
         shadow_remove_all_write_access(d, gpfn, gmfn);
-        res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
+        res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
         shadow_unlock(d);
     }
 
@@ -344,7 +344,7 @@
 
 static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
 {
-    struct pfn_info *page = &frame_table[page_nr];
+    struct pfn_info *page = pfn_to_page(page_nr);
 
     if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
     {
@@ -360,7 +360,7 @@
                                          unsigned long type,
                                          struct domain *d)
 {
-    struct pfn_info *page = &frame_table[page_nr];
+    struct pfn_info *page = pfn_to_page(page_nr);
 
     if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
         return 0;
@@ -412,7 +412,7 @@
          * Make sure that the mapped frame is an already-validated L2 table. 
          * If so, atomically increment the count (checking for overflow).
          */
-        page = &frame_table[pfn];
+        page = pfn_to_page(pfn);
         y = page->u.inuse.type_info;
         do {
             x = y;
@@ -435,7 +435,7 @@
     l1_pgentry_t l1e, struct domain *d)
 {
     unsigned long mfn = l1e_get_pfn(l1e);
-    struct pfn_info *page = &frame_table[mfn];
+    struct pfn_info *page = pfn_to_page(mfn);
     int okay;
     extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
 
@@ -587,7 +587,7 @@
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
 {
     unsigned long    pfn  = l1e_get_pfn(l1e);
-    struct pfn_info *page = &frame_table[pfn];
+    struct pfn_info *page = pfn_to_page(pfn);
     struct domain   *e;
     struct vcpu     *v;
 
@@ -645,7 +645,7 @@
 {
     if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
          (l2e_get_pfn(l2e) != pfn) )
-        put_page_and_type(&frame_table[l2e_get_pfn(l2e)]);
+        put_page_and_type(pfn_to_page(l2e_get_pfn(l2e)));
 }
 
 
@@ -655,7 +655,7 @@
 {
     if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
          (l3e_get_pfn(l3e) != pfn) )
-        put_page_and_type(&frame_table[l3e_get_pfn(l3e)]);
+        put_page_and_type(pfn_to_page(l3e_get_pfn(l3e)));
 }
 
 #endif
@@ -666,7 +666,7 @@
 {
     if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
          (l4e_get_pfn(l4e) != pfn) )
-        put_page_and_type(&frame_table[l4e_get_pfn(l4e)]);
+        put_page_and_type(pfn_to_page(l4e_get_pfn(l4e)));
 }
 
 #endif
@@ -1584,9 +1584,9 @@
         write_ptbase(v);
 
         if ( shadow_mode_refcounts(d) )
-            put_page(&frame_table[old_base_mfn]);
+            put_page(pfn_to_page(old_base_mfn));
         else
-            put_page_and_type(&frame_table[old_base_mfn]);
+            put_page_and_type(pfn_to_page(old_base_mfn));
 
         /* CR3 also holds a ref to its shadow... */
         if ( shadow_mode_enabled(d) )
@@ -1595,7 +1595,7 @@
                 put_shadow_ref(v->arch.monitor_shadow_ref);
             v->arch.monitor_shadow_ref =
                 pagetable_get_pfn(v->arch.monitor_table);
-            ASSERT(!page_get_owner(&frame_table[v->arch.monitor_shadow_ref]));
+            ASSERT(!page_get_owner(pfn_to_page(v->arch.monitor_shadow_ref)));
             get_shadow_ref(v->arch.monitor_shadow_ref);
         }
     }
@@ -1763,7 +1763,7 @@
 
         okay = 1;
         mfn  = op.arg1.mfn;
-        page = &frame_table[mfn];
+        page = pfn_to_page(mfn);
 
         switch ( op.cmd )
         {
@@ -1845,7 +1845,7 @@
                     pagetable_get_pfn(v->arch.guest_table_user);
                 v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
                 if ( old_mfn != 0 )
-                    put_page_and_type(&frame_table[old_mfn]);
+                    put_page_and_type(pfn_to_page(old_mfn));
             }
             break;
 #endif
@@ -2145,7 +2145,7 @@
             va = map_domain_page_with_cache(mfn, &mapcache);
             va = (void *)((unsigned long)va +
                           (unsigned long)(req.ptr & ~PAGE_MASK));
-            page = &frame_table[mfn];
+            page = pfn_to_page(mfn);
 
             switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
             {
@@ -2285,7 +2285,7 @@
 
             mark_dirty(FOREIGNDOM, mfn);
 
-            put_page(&frame_table[mfn]);
+            put_page(pfn_to_page(mfn));
             break;
 
         default:
@@ -2728,7 +2728,7 @@
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
         if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
-            put_page_and_type(&frame_table[pfn]);
+            put_page_and_type(pfn_to_page(pfn));
         v->arch.perdomain_ptes[i] = l1e_empty();
         v->arch.guest_context.gdt_frames[i] = 0;
     }
@@ -2753,7 +2753,7 @@
     for ( i = 0; i < nr_pages; i++ ) {
         pfn = frames[i];
         if ((pfn >= max_page) ||
-            !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
+            !get_page_and_type(pfn_to_page(pfn), d, PGT_gdt_page) )
             goto fail;
     }
 
@@ -2773,7 +2773,7 @@
 
  fail:
     while ( i-- > 0 )
-        put_page_and_type(&frame_table[frames[i]]);
+        put_page_and_type(pfn_to_page(frames[i]));
     return -EINVAL;
 }
 
@@ -2827,7 +2827,7 @@
         return -EINVAL;
     }
 
-    page = &frame_table[mfn];
+    page = pfn_to_page(mfn);
     if ( unlikely(!get_page(page, dom)) )
     {
         UNLOCK_BIGLOCK(dom);
@@ -3037,7 +3037,7 @@
         if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
         {
             if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
-                put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
+                put_page_type(pfn_to_page(l1e_get_pfn(nl1e)));
             continue;
         }
 
@@ -3220,7 +3220,7 @@
     }
 
     pfn  = l1e_get_pfn(pte);
-    page = &frame_table[pfn];
+    page = pfn_to_page(pfn);
 
     /* We are looking only for read-only mappings of p.t. pages. */
     if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
@@ -3331,7 +3331,7 @@
     }
 
     pfn  = l1e_get_pfn(pte);
-    page = &frame_table[pfn];
+    page = pfn_to_page(pfn);
 
 #ifdef CONFIG_X86_64
 #define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT | _PAGE_USER)
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/shadow.c     Tue Dec 20 11:46:56 2005
@@ -504,7 +504,7 @@
             l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
 
         spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
-            
l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
+            
l2e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt),
                             __PAGE_HYPERVISOR);
 
         if ( shadow_mode_translate(d) ) // NB: not external
@@ -670,7 +670,7 @@
             set_guest_back_ptr(d, sl1e, sl1mfn, i);
         }
 
-        frame_table[sl1mfn].tlbflush_timestamp =
+        pfn_to_page(sl1mfn)->tlbflush_timestamp =
             SHADOW_ENCODE_MIN_MAX(min, max);
 
         unmap_domain_page(gpl1e);
@@ -907,7 +907,7 @@
     u32 min_max = 0;
     int min, max, length;
 
-    if ( test_and_set_bit(_PGC_out_of_sync, &frame_table[gmfn].count_info) )
+    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
     {
         ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
         return SHADOW_SNAPSHOT_ELSEWHERE;
@@ -953,7 +953,7 @@
                              unsigned long mfn)
 {
     struct domain *d = v->domain;
-    struct pfn_info *page = &frame_table[mfn];
+    struct pfn_info *page = pfn_to_page(mfn);
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
     ASSERT(shadow_lock_is_acquired(d));
@@ -1174,7 +1174,7 @@
                 && i == PAGING_L4)
                 continue;       /* skip the top-level for 3-level */
 
-            if ( page_out_of_sync(&frame_table[gmfn]) &&
+            if ( page_out_of_sync(pfn_to_page(gmfn)) &&
                  !snapshot_entry_matches(
                      d, guest_pt, gpfn, table_offset_64(va, i)) )
             {
@@ -1200,7 +1200,7 @@
         }
 
         /* L2 */
-        if ( page_out_of_sync(&frame_table[gmfn]) &&
+        if ( page_out_of_sync(pfn_to_page(gmfn)) &&
              !snapshot_entry_matches(d, guest_pt, gpfn, l2_table_offset(va)) )
         {
             unmap_and_return (1);
@@ -1214,7 +1214,7 @@
 #undef unmap_and_return
 #endif /* CONFIG_PAGING_LEVELS >= 3 */
     {
-        if ( page_out_of_sync(&frame_table[l2mfn]) &&
+        if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
              !snapshot_entry_matches(d, (guest_l1_pgentry_t 
*)v->arch.guest_vtable,
                                      l2pfn, guest_l2_table_offset(va)) )
             return 1;
@@ -1234,7 +1234,7 @@
 
     guest_pt = (guest_l1_pgentry_t *) map_domain_page(l1mfn);
 
-    if ( page_out_of_sync(&frame_table[l1mfn]) &&
+    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
          !snapshot_entry_matches(
              d, guest_pt, l1pfn, guest_l1_table_offset(va)) ) 
     {
@@ -1324,18 +1324,18 @@
     int i;
     u32 found = 0;
     int is_l1_shadow =
-        ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
+        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 #if CONFIG_PAGING_LEVELS == 4
     is_l1_shadow |=
-      ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
+      ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
                 PGT_fl1_shadow);
 #endif
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask)
+        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
             >> PGT_va_shift;
 
         if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
@@ -1373,7 +1373,7 @@
 
     // If it's not a writable page, then no writable refs can be outstanding.
     //
-    if ( (frame_table[readonly_gmfn].u.inuse.type_info & PGT_type_mask) !=
+    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
          PGT_writable_page )
     {
         perfc_incrc(remove_write_not_writable);
@@ -1383,7 +1383,7 @@
     // How many outstanding writable PTEs for this page are there?
     //
     write_refs =
-        (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask);
+        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
     if ( write_refs && MFN_PINNED(readonly_gmfn) )
     {
         write_refs--;
@@ -1401,7 +1401,7 @@
 
          // Use the back pointer to locate the shadow page that can contain
          // the PTE of interest
-         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) 
) {
+         if ( (predicted_smfn = 
pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
              found += remove_all_write_access_in_ptpage(
                  d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
              if ( found == write_refs )
@@ -1670,7 +1670,7 @@
                     if ( !(entry_get_flags(guest_pt[i]) & _PAGE_PRESENT) &&
                          unlikely(entry_get_value(guest_pt[i]) != 0) &&
                          !unshadow &&
-                         (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
+                         (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                         unshadow = 1;
                 }
 #endif
@@ -1718,7 +1718,7 @@
                 if ( !(guest_root_get_flags(new_root_e) & _PAGE_PRESENT) &&
                      unlikely(guest_root_get_intpte(new_root_e) != 0) &&
                      !unshadow &&
-                     (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
+                     (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                     unshadow = 1;
             }
             if ( max == -1 )
@@ -2401,7 +2401,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               frame_table[eff_guest_mfn].u.inuse.type_info,
+               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW coherence");
     }
@@ -2412,7 +2412,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               frame_table[eff_guest_mfn].u.inuse.type_info,
+               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW2 coherence");
     }
@@ -2781,7 +2781,7 @@
          * When we free L2 pages, we need to tell if the page contains
          * Xen private mappings. Use the va_mask part.
          */
-        frame_table[s2mfn].u.inuse.type_info |= 
+        pfn_to_page(s2mfn)->u.inuse.type_info |= 
             (unsigned long) 3 << PGT_score_shift; 
 
         memset(spl2e, 0, 
@@ -2794,7 +2794,7 @@
         for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
             spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
                 l2e_from_page(
-                    
virt_to_page(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) + i, 
+                    
virt_to_page(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
                     __PAGE_HYPERVISOR);
         for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
@@ -2896,7 +2896,7 @@
            ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
 
         spl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-            
l4e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_l3),
+            
l4e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_l3),
                             __PAGE_HYPERVISOR);
 
         if ( shadow_mode_translate(d) ) // NB: not external
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/shadow32.c   Tue Dec 20 11:46:56 2005
@@ -30,7 +30,7 @@
 #include <xen/sched.h>
 #include <xen/trace.h>
 
-#define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
+#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
 #define va_to_l1mfn(_ed, _va) \
     (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
 
@@ -144,11 +144,11 @@
     if ( !shadow_mode_refcounts(d) )
         return;
 
-    ASSERT(frame_table[gmfn].count_info & PGC_page_table);
+    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
 
     if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
     {
-        clear_bit(_PGC_page_table, &frame_table[gmfn].count_info);
+        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
 
         if ( page_out_of_sync(pfn_to_page(gmfn)) )
         {
@@ -380,7 +380,7 @@
 
 void free_shadow_page(unsigned long smfn)
 {
-    struct pfn_info *page = &frame_table[smfn];
+    struct pfn_info *page = pfn_to_page(smfn);
     unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
     struct domain *d = page_get_owner(pfn_to_page(gmfn));
     unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
@@ -465,8 +465,8 @@
 {
     struct pfn_info *page;
 
-    page = &frame_table[entry->gmfn];
-        
+    page = pfn_to_page(entry->gmfn);
+
     // Decrement ref count of guest & shadow pages
     //
     put_page(page);
@@ -795,7 +795,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page(v->arch.monitor_vtable);
-    free_domheap_page(&frame_table[mfn]);
+    free_domheap_page(pfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -1018,8 +1018,8 @@
         {
             // external guests provide their own memory for their P2M maps.
             //
-            ASSERT( d == page_get_owner(
-                        &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
+            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
+                d->arch.phys_table))));
         }
     }
 
@@ -1543,7 +1543,7 @@
             l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
 
         spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
-            
l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
+            
l2e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt),
                             __PAGE_HYPERVISOR);
 
         if ( shadow_mode_translate(d) ) // NB: not external
@@ -1675,7 +1675,7 @@
             set_guest_back_ptr(d, sl1e, sl1mfn, i);
         }
 
-        frame_table[sl1mfn].tlbflush_timestamp =
+        pfn_to_page(sl1mfn)->tlbflush_timestamp =
             SHADOW_ENCODE_MIN_MAX(min, max);
     }
 }
@@ -1758,7 +1758,7 @@
     u32 min_max = 0;
     int min, max, length;
 
-    if ( test_and_set_bit(_PGC_out_of_sync, &frame_table[gmfn].count_info) )
+    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
     {
         ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
         return SHADOW_SNAPSHOT_ELSEWHERE;
@@ -1809,7 +1809,7 @@
 
     // Clear the out_of_sync bit.
     //
-    clear_bit(_PGC_out_of_sync, &frame_table[entry->gmfn].count_info);
+    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
 
     // XXX Need to think about how to protect the domain's
     // information less expensively.
@@ -1826,7 +1826,7 @@
                              unsigned long mfn)
 {
     struct domain *d = v->domain;
-    struct pfn_info *page = &frame_table[mfn];
+    struct pfn_info *page = pfn_to_page(mfn);
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
     ASSERT(shadow_lock_is_acquired(d));
@@ -1992,7 +1992,7 @@
 
     perfc_incrc(shadow_out_of_sync_calls);
 
-    if ( page_out_of_sync(&frame_table[l2mfn]) &&
+    if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
          !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
                                  l2pfn, l2_table_offset(va)) )
         return 1;
@@ -2008,7 +2008,7 @@
     if ( !VALID_MFN(l1mfn) )
         return 0;
 
-    if ( page_out_of_sync(&frame_table[l1mfn]) &&
+    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
          !snapshot_entry_matches(
              d, &linear_pg_table[l1_linear_offset(va) & 
~(L1_PAGETABLE_ENTRIES-1)],
              l1pfn, l1_table_offset(va)) )
@@ -2136,13 +2136,13 @@
     int i;
     u32 found = 0;
     int is_l1_shadow =
-        ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
+        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
+        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
             >> PGT_va_shift;
 
         if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
@@ -2180,7 +2180,7 @@
 
     // If it's not a writable page, then no writable refs can be outstanding.
     //
-    if ( (frame_table[readonly_gmfn].u.inuse.type_info & PGT_type_mask) !=
+    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
          PGT_writable_page )
     {
         perfc_incrc(remove_write_not_writable);
@@ -2190,7 +2190,7 @@
     // How many outstanding writable PTEs for this page are there?
     //
     write_refs =
-        (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask);
+        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
     if ( write_refs && MFN_PINNED(readonly_gmfn) )
     {
         write_refs--;
@@ -2208,7 +2208,7 @@
 
          // Use the back pointer to locate the shadow page that can contain
          // the PTE of interest
-         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) 
) {
+         if ( (predicted_smfn = 
pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
              found += remove_all_write_access_in_ptpage(
                  d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
              if ( found == write_refs )
@@ -2249,7 +2249,7 @@
     int i;
     u32 count = 0;
     int is_l1_shadow =
-        ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
+        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(forbidden_gmfn, flags);
@@ -2266,7 +2266,7 @@
         if ( is_l1_shadow )
             shadow_put_page_from_l1e(ol2e, d);
         else /* must be an hl2 page */
-            put_page(&frame_table[forbidden_gmfn]);
+            put_page(pfn_to_page(forbidden_gmfn));
     }
 
     unmap_domain_page(pl1e);
@@ -3156,7 +3156,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               frame_table[eff_guest_mfn].u.inuse.type_info,
+               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW coherence");
     }
@@ -3167,7 +3167,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               frame_table[eff_guest_mfn].u.inuse.type_info,
+               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW2 coherence");
     }
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/shadow_public.c      Tue Dec 20 11:46:56 2005
@@ -168,14 +168,14 @@
 #if CONFIG_PAGING_LEVELS >=3
     if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
     {
-        struct pfn_info *page = &frame_table[smfn];
+        struct pfn_info *page = pfn_to_page(smfn);
         for ( i = 0; i < PDP_ENTRIES; i++ )
         {
             if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
                 free_fake_shadow_l2(d,entry_get_pfn(ple[i]));
         }
 
-        page = &frame_table[entry_get_pfn(ple[0])];
+        page = pfn_to_page(entry_get_pfn(ple[0]));
         free_domheap_pages(page, SL2_ORDER);
         unmap_domain_page(ple);
     }
@@ -208,7 +208,7 @@
                     break;
                 if ( level == PAGING_L2 )
                 {
-                    struct pfn_info *page = &frame_table[smfn]; 
+                    struct pfn_info *page = pfn_to_page(smfn);
                     if ( is_xen_l2_slot(page->u.inuse.type_info, i) )
                         continue;
                 }
@@ -299,7 +299,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page(v->arch.monitor_vtable);
-    free_domheap_page(&frame_table[mfn]);
+    free_domheap_page(pfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -394,7 +394,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page(v->arch.monitor_vtable);
-    free_domheap_page(&frame_table[mfn]);
+    free_domheap_page(pfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -411,7 +411,7 @@
 
     // Clear the out_of_sync bit.
     //
-    clear_bit(_PGC_out_of_sync, &frame_table[entry->gmfn].count_info);
+    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
 
     // XXX Need to think about how to protect the domain's
     // information less expensively.
@@ -428,7 +428,7 @@
 {
     struct pfn_info *page;
 
-    page = &frame_table[entry->gmfn];
+    page = pfn_to_page(entry->gmfn);
         
     // Decrement ref count of guest & shadow pages
     //
@@ -501,11 +501,11 @@
     if ( !shadow_mode_refcounts(d) )
         return;
 
-    ASSERT(frame_table[gmfn].count_info & PGC_page_table);
+    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
 
     if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
     {
-        clear_bit(_PGC_page_table, &frame_table[gmfn].count_info);
+        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
 
         if ( page_out_of_sync(pfn_to_page(gmfn)) )
         {
@@ -600,7 +600,7 @@
 
 void free_shadow_page(unsigned long smfn)
 {
-    struct pfn_info *page = &frame_table[smfn];
+    struct pfn_info *page = pfn_to_page(smfn);
 
     unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
     struct domain *d = page_get_owner(pfn_to_page(gmfn));
@@ -1067,8 +1067,8 @@
         {
             // external guests provide their own memory for their P2M maps.
             //
-            ASSERT( d == page_get_owner(
-                &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
+            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
+                d->arch.phys_table))));
         }
     }
 
@@ -1643,7 +1643,7 @@
     int i;
     u32 count = 0;
     int is_l1_shadow =
-        ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
+        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(forbidden_gmfn, flags);
@@ -1660,7 +1660,7 @@
         if ( is_l1_shadow )
             shadow_put_page_from_l1e(ol2e, d);
         else /* must be an hl2 page */
-            put_page(&frame_table[forbidden_gmfn]);
+            put_page(pfn_to_page(forbidden_gmfn));
     }
 
     unmap_domain_page(pl1e);
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/x86_32/mm.c  Tue Dec 20 11:46:56 2005
@@ -177,10 +177,11 @@
             idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i]);
         for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
         {
-            frame_table[m2p_start_mfn+j].count_info = PGC_allocated | 1;
+            struct pfn_info *page = pfn_to_page(m2p_start_mfn + j);
+            page->count_info = PGC_allocated | 1;
             /* Ensure it's only mapped read-only by domains. */
-            frame_table[m2p_start_mfn+j].u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(&frame_table[m2p_start_mfn+j], dom_xen);
+            page->u.inuse.type_info = PGT_gdt_page | 1;
+            page_set_owner(page, dom_xen);
         }
     }
 }
diff -r 1283d309a603 -r 3d1c7be170a7 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Sun Dec 18 19:29:43 2005
+++ b/xen/arch/x86/x86_64/mm.c  Tue Dec 20 11:46:56 2005
@@ -166,11 +166,12 @@
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            frame_table[m2p_start_mfn+i].count_info = PGC_allocated | 1;
+            struct pfn_info *page = pfn_to_page(m2p_start_mfn + i);
+            page->count_info = PGC_allocated | 1;
             /* gdt to make sure it's only mapped read-only by non-privileged
                domains. */
-            frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(&frame_table[m2p_start_mfn+i], dom_xen);
+            page->u.inuse.type_info = PGT_gdt_page | 1;
+            page_set_owner(page, dom_xen);
         }
     }
 }
diff -r 1283d309a603 -r 3d1c7be170a7 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Sun Dec 18 19:29:43 2005
+++ b/xen/common/grant_table.c  Tue Dec 20 11:46:56 2005
@@ -238,8 +238,8 @@
 
         if ( unlikely(!pfn_valid(frame)) ||
              unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
-                        get_page(&frame_table[frame], rd) :
-                        get_page_and_type(&frame_table[frame], rd,
+                        get_page(pfn_to_page(frame), rd) :
+                        get_page_and_type(pfn_to_page(frame), rd,
                                           PGT_writable_page))) )
         {
             clear_bit(_GTF_writing, &sha->flags);
@@ -301,7 +301,7 @@
                 sflags = prev_sflags;
             }
 
-            if ( unlikely(!get_page_type(&frame_table[frame],
+            if ( unlikely(!get_page_type(pfn_to_page(frame),
                                          PGT_writable_page)) )
             {
                 clear_bit(_GTF_writing, &sha->flags);
@@ -347,14 +347,14 @@
                 if ( (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) == 0 )
                 {
                     clear_bit(_GTF_writing, &sha->flags);
-                    put_page_type(&frame_table[frame]);
+                    put_page_type(pfn_to_page(frame));
                 }
             }
 
             if ( act->pin == 0 )
             {
                 clear_bit(_GTF_reading, &sha->flags);
-                put_page(&frame_table[frame]);
+                put_page(pfn_to_page(frame));
             }
 
             spin_unlock(&rd->grant_table->lock);
@@ -500,14 +500,14 @@
          !(flags & GNTMAP_readonly) )
     {
         clear_bit(_GTF_writing, &sha->flags);
-        put_page_type(&frame_table[frame]);
+        put_page_type(pfn_to_page(frame));
     }
 
     if ( act->pin == 0 )
     {
         act->frame = 0xdeadbeef;
         clear_bit(_GTF_reading, &sha->flags);
-        put_page(&frame_table[frame]);
+        put_page(pfn_to_page(frame));
     }
 
  unmap_out:
@@ -691,7 +691,7 @@
         }
 
         /* Check the passed page frame for basic validity. */
-        page = &frame_table[gop.mfn];
+        page = pfn_to_page(gop.mfn);
         if ( unlikely(!pfn_valid(gop.mfn) || IS_XEN_HEAP_FRAME(page)) )
         { 
             DPRINTK("gnttab_transfer: out-of-range or xen frame %lx\n",
@@ -1016,14 +1016,14 @@
             if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
             {
                 clear_bit(_GTF_writing, &sha->flags);
-                put_page_type(&frame_table[act->frame]);
+                put_page_type(pfn_to_page(act->frame));
             }
         }
 
         if ( act->pin == 0 )
         {
             clear_bit(_GTF_reading, &sha->flags);
-            put_page(&frame_table[act->frame]);
+            put_page(pfn_to_page(act->frame));
         }
 
         spin_unlock(&rd->grant_table->lock);
diff -r 1283d309a603 -r 3d1c7be170a7 xen/common/memory.c
--- a/xen/common/memory.c       Sun Dec 18 19:29:43 2005
+++ b/xen/common/memory.c       Tue Dec 20 11:46:56 2005
@@ -102,7 +102,7 @@
                 return i;
             }
             
-            page = &frame_table[mpfn + j];
+            page = pfn_to_page(mpfn + j);
             if ( unlikely(!get_page(page, d)) )
             {
                 DPRINTK("Bad page free for domain %u\n", d->domain_id);
diff -r 1283d309a603 -r 3d1c7be170a7 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Sun Dec 18 19:29:43 2005
+++ b/xen/include/asm-x86/shadow.h      Tue Dec 20 11:46:56 2005
@@ -493,9 +493,9 @@
         SH_VLOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
                mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
         SH_VLOG("dom=%p caf=%08x taf=%" PRtype_info, 
-               page_get_owner(&frame_table[mfn]),
-               frame_table[mfn].count_info, 
-               frame_table[mfn].u.inuse.type_info );
+                page_get_owner(pfn_to_page(mfn)),
+                pfn_to_page(mfn)->count_info, 
+                pfn_to_page(mfn)->u.inuse.type_info );
     }
 #endif
 }
@@ -648,20 +648,20 @@
 
     ASSERT(pfn_valid(smfn));
 
-    x = frame_table[smfn].count_info;
+    x = pfn_to_page(smfn)->count_info;
     nx = x + 1;
 
     if ( unlikely(nx == 0) )
     {
         printk("get_shadow_ref overflow, gmfn=%" PRtype_info  " smfn=%lx\n",
-               frame_table[smfn].u.inuse.type_info & PGT_mfn_mask,
+               pfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
                smfn);
         BUG();
     }
     
     // Guarded by the shadow lock...
     //
-    frame_table[smfn].count_info = nx;
+    pfn_to_page(smfn)->count_info = nx;
 
     return 1;
 }
@@ -678,7 +678,7 @@
 
     ASSERT(pfn_valid(smfn));
 
-    x = frame_table[smfn].count_info;
+    x = pfn_to_page(smfn)->count_info;
     nx = x - 1;
 
     if ( unlikely(x == 0) )
@@ -686,14 +686,14 @@
         printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%" 
                PRtype_info "\n",
                smfn,
-               frame_table[smfn].count_info,
-               frame_table[smfn].u.inuse.type_info);
+               pfn_to_page(smfn)->count_info,
+               pfn_to_page(smfn)->u.inuse.type_info);
         BUG();
     }
 
     // Guarded by the shadow lock...
     //
-    frame_table[smfn].count_info = nx;
+    pfn_to_page(smfn)->count_info = nx;
 
     if ( unlikely(nx == 0) )
     {
@@ -704,9 +704,9 @@
 static inline void
 shadow_pin(unsigned long smfn)
 {
-    ASSERT( !(frame_table[smfn].u.inuse.type_info & PGT_pinned) );
-
-    frame_table[smfn].u.inuse.type_info |= PGT_pinned;
+    ASSERT( !(pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
+
+    pfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
     if ( unlikely(!get_shadow_ref(smfn)) )
         BUG();
 }
@@ -714,9 +714,9 @@
 static inline void
 shadow_unpin(unsigned long smfn)
 {
-    ASSERT( (frame_table[smfn].u.inuse.type_info & PGT_pinned) );
-
-    frame_table[smfn].u.inuse.type_info &= ~PGT_pinned;
+    ASSERT( (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
+
+    pfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
     put_shadow_ref(smfn);
 }
 
@@ -732,9 +732,9 @@
 
         ASSERT(shadow_lock_is_acquired(d));
         gmfn = l1e_get_pfn(spte);
-        frame_table[gmfn].tlbflush_timestamp = smfn;
-        frame_table[gmfn].u.inuse.type_info &= ~PGT_va_mask;
-        frame_table[gmfn].u.inuse.type_info |= (unsigned long) index << 
PGT_va_shift;
+        pfn_to_page(gmfn)->tlbflush_timestamp = smfn;
+        pfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
+        pfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << 
PGT_va_shift;
     }
 }
 
@@ -941,7 +941,7 @@
             //
             perfc_incrc(validate_pte_changes2);
             if ( likely(l1e_get_flags(new_spte) & _PAGE_PRESENT) )
-                shadow_put_page_type(d, &frame_table[l1e_get_pfn(new_spte)]);
+                shadow_put_page_type(d, pfn_to_page(l1e_get_pfn(new_spte)));
         }
         else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
                    _PAGE_PRESENT ) &&
@@ -1216,8 +1216,8 @@
             printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info 
" "
                    "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
                    d->domain_id, gpfn, gmfn, stype,
-                   frame_table[gmfn].count_info,
-                   frame_table[gmfn].u.inuse.type_info,
+                   pfn_to_page(gmfn)->count_info,
+                   pfn_to_page(gmfn)->u.inuse.type_info,
                    mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
             BUG();
         }
@@ -1597,7 +1597,7 @@
     struct vcpu *v = current;
     struct domain *d = v->domain;
     unsigned long mfn = __gpfn_to_mfn(d, gpfn);
-    u32 type = frame_table[mfn].u.inuse.type_info & PGT_type_mask;
+    u32 type = pfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
 
     if ( shadow_mode_refcounts(d) &&
          (type == PGT_writable_page) )
diff -r 1283d309a603 -r 3d1c7be170a7 xen/include/asm-x86/shadow_public.h
--- a/xen/include/asm-x86/shadow_public.h       Sun Dec 18 19:29:43 2005
+++ b/xen/include/asm-x86/shadow_public.h       Tue Dec 20 11:46:56 2005
@@ -22,7 +22,7 @@
 #ifndef _XEN_SHADOW_PUBLIC_H
 #define _XEN_SHADOW_PUBLIC_H
 #if CONFIG_PAGING_LEVELS >= 3
-#define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
+#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
 
 extern int alloc_p2m_table(struct domain *d);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.