[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Rename physical-address-related variables and functions



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 0c94043f5c5b845a2b0731c444aec09ef7a901f4
# Parent  a12e08eb0209f54b299c17f14ca6aec908d6fae8
Rename physical-address-related variables and functions
to follow a new ocnsistent naming scheme.

gpfn is a guest pseudophys frame number.
gmfn is a machine frame number (from guest p.o.v.)
mfn is a real bona fide machine number.
pfn is an arbitrary frame number (used in general-purpose
'polymorphic' functions).

pfn_info now called page_info.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/linux-xen/mm_contig.c
--- a/xen/arch/ia64/linux-xen/mm_contig.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/linux-xen/mm_contig.c       Wed Feb  1 15:28:50 2006
@@ -48,7 +48,7 @@
        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        i = max_mapnr;
        while (i-- > 0) {
-               if (!pfn_valid(i))
+               if (!mfn_valid(i))
                        continue;
                total++;
                if (PageReserved(mem_map+i))
@@ -253,7 +253,7 @@
        num_physpages = 0;
        efi_memmap_walk(count_pages, &num_physpages);
 
-       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       max_dma = virt_to_maddr((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
        memset(zholes_size, 0, sizeof(zholes_size));
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Feb  1 15:28:50 2006
@@ -64,10 +64,10 @@
         d = find_domain_by_id(domid);
     }
     xen_gppn = arch_ppn_to_xen_ppn(gpfn);
-    xen_mppn = __gpfn_to_mfn(d, xen_gppn);
+    xen_mppn = gmfn_to_mfn(d, xen_gppn);
 /*
     for (i=0; i<pages; i++) {
-        if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
+        if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
             return INVALID_MFN;
         }
     }
@@ -143,7 +143,7 @@
 
 static thash_cb_t *init_domain_vhpt(struct vcpu *d)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     void   *vbase,*vcur;
     vhpt_special *vs;
     thash_cb_t  *vhpt;
@@ -188,7 +188,7 @@
 
 thash_cb_t *init_domain_tlb(struct vcpu *d)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     void    *vbase,*vcur;
     tlb_special_t  *ts;
     thash_cb_t  *tlb;
@@ -228,7 +228,7 @@
 void
 alloc_pmt(struct domain *d)
 {
-    struct pfn_info *page;
+    struct page_info *page;
 
     /* Only called once */
     ASSERT(d->arch.pmt);
@@ -392,7 +392,7 @@
         if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
         gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
     }
-    mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
+    mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
     if ( mfn == INVALID_MFN ) return 0;
  
     mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
@@ -789,7 +789,7 @@
     gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
     gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT)); 
 
-    mpfn = __gpfn_to_mfn(v->domain, gpfn);
+    mpfn = gmfn_to_mfn(v->domain, gpfn);
     m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
     /* machine address may be not continuous */
     end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/vmx/vmx_init.c      Wed Feb  1 15:28:50 2006
@@ -307,7 +307,7 @@
 {
        unsigned int order;
        unsigned long i, j, start, end, pgnr, conf_nr;
-       struct pfn_info *page;
+       struct page_info *page;
        struct vcpu *v = d->vcpu[0];
 
        ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
@@ -329,7 +329,7 @@
        }
 
        /* Map normal memory below 3G */
-       pgnr = page_to_pfn(page);
+       pgnr = page_to_mfn(page);
        end = conf_nr << PAGE_SHIFT;
        for (i = 0;
             i < (end < MMIO_START ? end : MMIO_START);
@@ -354,7 +354,7 @@
        }
 
        /* Map guest firmware */
-       pgnr = page_to_pfn(page);
+       pgnr = page_to_mfn(page);
        for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
            map_domain_page(d, i, pgnr << PAGE_SHIFT);
 
@@ -364,7 +364,7 @@
        }
 
        /* Map for shared I/O page and xenstore */
-       pgnr = page_to_pfn(page);
+       pgnr = page_to_mfn(page);
        map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
        pgnr++;
        map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/vmx/vtlb.c  Wed Feb  1 15:28:50 2006
@@ -988,7 +988,7 @@
 
 void check_vtlb_sanity(thash_cb_t *vtlb)
 {
-//    struct pfn_info *page;
+//    struct page_info *page;
     u64  hash_num, i, psr;
     static u64 check_ok_num, check_fail_num,check_invalid;
 //  void *vb1, *vb2;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/dom0_ops.c      Wed Feb  1 15:28:50 2006
@@ -29,7 +29,7 @@
     {
     case DOM0_GETPAGEFRAMEINFO:
     {
-        struct pfn_info *page;
+        struct page_info *page;
         unsigned long pfn = op->u.getpageframeinfo.pfn;
         domid_t dom = op->u.getpageframeinfo.domain;
         struct domain *d;
@@ -102,7 +102,7 @@
      
             for( j = 0; j < k; j++ )
             {      
-                struct pfn_info *page;
+                struct page_info *page;
                 unsigned long mfn = l_arr[j];
 
                 if ( unlikely(mfn >= max_page) )
@@ -177,7 +177,7 @@
 
             for ( i = start_page; i < (start_page + nr_pages); i++ )
             {
-               pfn = __gpfn_to_mfn_foreign(d, i);
+               pfn = gmfn_to_mfn_foreign(d, i);
 
                 if ( put_user(pfn, buffer) )
                 {
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/dom_fw.c
--- a/xen/arch/ia64/xen/dom_fw.c        Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/dom_fw.c        Wed Feb  1 15:28:50 2006
@@ -807,7 +807,7 @@
                /*
                 * This is a bad hack.  Dom0 may share other domains' memory
                 * through a dom0 physical address.  Unfortunately, this
-                * address may be used in phys_to_page (e.g. in the loopback
+                * address may be used in maddr_to_page (e.g. in the loopback
                 * driver) but when Linux initializes memory it only creates
                 * page structs for the physical memory it knows about.  And
                 * on ia64, only for full writeback granules.  So, we reserve
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/domain.c        Wed Feb  1 15:28:50 2006
@@ -385,7 +385,7 @@
 printk("map_new_domain0_page: 
start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
                while(1);
        }
-       return pfn_to_page((mpaddr >> PAGE_SHIFT));
+       return mfn_to_page((mpaddr >> PAGE_SHIFT));
 }
 
 /* allocate new page for domain and map it to the specified metaphysical addr 
*/
@@ -425,16 +425,16 @@
                {
                        p = alloc_domheap_page(d);
                        // zero out pages for security reasons
-                       if (p) memset(__va(page_to_phys(p)),0,PAGE_SIZE);
+                       if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
                }
                if (unlikely(!p)) {
 printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
                        return(p);
                }
-if (unlikely(page_to_phys(p) > vhpt_paddr && page_to_phys(p) < vhpt_pend)) {
-  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_phys(p));
-}
-               set_pte(pte, pfn_pte(page_to_phys(p) >> PAGE_SHIFT,
+if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
+  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
+}
+               set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
        }
        else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
@@ -662,7 +662,7 @@
 #else
                p = map_new_domain_page(d,dom_mpaddr);
                if (unlikely(!p)) BUG();
-               dom_imva = __va(page_to_phys(p));
+               dom_imva = __va(page_to_maddr(p));
 #endif
                if (filesz > 0) {
                        if (filesz >= PAGE_SIZE)
@@ -778,7 +778,7 @@
        unsigned long nr_pt_pages;
        unsigned long count;
        unsigned long alloc_start, alloc_end;
-       struct pfn_info *page = NULL;
+       struct page_info *page = NULL;
        start_info_t *si;
        struct vcpu *v = d->vcpu[0];
 
@@ -915,7 +915,7 @@
        si->nr_pages     = d->tot_pages;
 
 #if 0
-       si->shared_info  = virt_to_phys(d->shared_info);
+       si->shared_info  = virt_to_maddr(d->shared_info);
        si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
        //si->pt_base      = vpt_start;
        //si->nr_pt_frames = nr_pt_pages;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/mm_init.c       Wed Feb  1 15:28:50 2006
@@ -446,7 +446,7 @@
 
        if (map_start < map_end)
                memmap_init_zone(map_start, (unsigned long) (map_end - 
map_start),
-                                args->nid, args->zone, page_to_pfn(map_start));
+                                args->nid, args->zone, page_to_mfn(map_start));
        return 0;
 }
 
@@ -469,16 +469,16 @@
 }
 
 int
-ia64_pfn_valid (unsigned long pfn)
+ia64_mfn_valid (unsigned long pfn)
 {
        char byte;
-       struct page *pg = pfn_to_page(pfn);
+       struct page *pg = mfn_to_page(pfn);
 
        return     (__get_user(byte, (char *) pg) == 0)
                && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
                        || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
 }
-EXPORT_SYMBOL(ia64_pfn_valid);
+EXPORT_SYMBOL(ia64_mfn_valid);
 
 int
 find_largest_hole (u64 start, u64 end, void *arg)
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/xenmem.c
--- a/xen/arch/ia64/xen/xenmem.c        Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/xenmem.c        Wed Feb  1 15:28:50 2006
@@ -14,7 +14,7 @@
 #include <xen/mm.h>
 
 extern struct page *zero_page_memmap_ptr;
-struct pfn_info *frame_table;
+struct page_info *frame_table;
 unsigned long frame_table_size;
 unsigned long max_page;
 
@@ -34,7 +34,7 @@
 void
 paging_init (void)
 {
-       struct pfn_info *pg;
+       struct page_info *pg;
        unsigned int mpt_order;
        /* Create machine to physical mapping table
         * NOTE: similar to frame table, later we may need virtually
@@ -62,7 +62,7 @@
 void __init init_frametable(void)
 {
        unsigned long i, pfn;
-       frame_table_size = max_page * sizeof(struct pfn_info);
+       frame_table_size = max_page * sizeof(struct page_info);
        frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
 
        /* Request continuous trunk from boot allocator, since HV
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/ia64/xen/xenmisc.c       Wed Feb  1 15:28:50 2006
@@ -80,14 +80,14 @@
 }
 
 unsigned long
-__gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
+gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
 {
        if (d == dom0)
                return(gpfn);
        else {
                unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
                if (!pte) {
-printk("__gpfn_to_mfn_foreign: bad gpfn. spinning...\n");
+printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
 while(1);
                        return 0;
                }
@@ -96,11 +96,11 @@
 }
 #if 0
 u32
-__mfn_to_gpfn(struct domain *d, unsigned long frame)
+mfn_to_gmfn(struct domain *d, unsigned long frame)
 {
        // FIXME: is this right?
 if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
-printk("__mfn_to_gpfn: bad frame. spinning...\n");
+printk("mfn_to_gmfn: bad frame. spinning...\n");
 while(1);
 }
        return frame;
@@ -142,7 +142,7 @@
 }
 
 #if 0
-void free_page_type(struct pfn_info *page, unsigned int type)
+void free_page_type(struct page_info *page, unsigned int type)
 {
        dummy();
 }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c      Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/audit.c      Wed Feb  1 15:28:50 2006
@@ -61,7 +61,7 @@
 #ifdef __i386__
 #ifdef CONFIG_X86_PAE
         /* 32b PAE */
-        if ( (( pfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
+        if ( (( mfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
            >> PGT_va_shift) == 3 )
             return l2_table_offset(HYPERVISOR_VIRT_START); 
         else
@@ -76,7 +76,7 @@
 #endif
     }
 
-    void _adjust(struct pfn_info *page, int adjtype ADJUST_EXTRA_ARGS)
+    void _adjust(struct page_info *page, int adjtype ADJUST_EXTRA_ARGS)
     {
         int count;
 
@@ -90,7 +90,7 @@
             if ( page_get_owner(page) == NULL )
             {
                 APRINTK("adjust(mfn=%lx, dir=%d, adjtype=%d) owner=NULL",
-                        page_to_pfn(page), dir, adjtype);
+                        page_to_mfn(page), dir, adjtype);
                 errors++;
             }
 
@@ -98,7 +98,7 @@
             {
                 APRINTK("Audit %d: type count went below zero "
                         "mfn=%lx t=%" PRtype_info " ot=%x",
-                        d->domain_id, page_to_pfn(page),
+                        d->domain_id, page_to_mfn(page),
                         page->u.inuse.type_info,
                         page->tlbflush_timestamp);
                 errors++;
@@ -107,7 +107,7 @@
             {
                 APRINTK("Audit %d: type count overflowed "
                         "mfn=%lx t=%" PRtype_info " ot=%x",
-                        d->domain_id, page_to_pfn(page),
+                        d->domain_id, page_to_mfn(page),
                         page->u.inuse.type_info,
                         page->tlbflush_timestamp);
                 errors++;
@@ -124,7 +124,7 @@
         {
             APRINTK("Audit %d: general count went below zero "
                     "mfn=%lx t=%" PRtype_info " ot=%x",
-                    d->domain_id, page_to_pfn(page),
+                    d->domain_id, page_to_mfn(page),
                     page->u.inuse.type_info,
                     page->tlbflush_timestamp);
             errors++;
@@ -133,7 +133,7 @@
         {
             APRINTK("Audit %d: general count overflowed "
                     "mfn=%lx t=%" PRtype_info " ot=%x",
-                    d->domain_id, page_to_pfn(page),
+                    d->domain_id, page_to_mfn(page),
                     page->u.inuse.type_info,
                     page->tlbflush_timestamp);
             errors++;
@@ -153,7 +153,7 @@
             if ( l2e_get_flags(pt[i]) & _PAGE_PRESENT )
             {
                unsigned long l1mfn = l2e_get_pfn(pt[i]);
-                struct pfn_info *l1page = pfn_to_page(l1mfn);
+                struct page_info *l1page = mfn_to_page(l1mfn);
 
                 if ( noisy )
                 {
@@ -223,7 +223,7 @@
         {
             unsigned long hl2mfn =
                 l2e_get_pfn(pt[l2_table_offset(LINEAR_PT_VIRT_START)]);
-            struct pfn_info *hl2page = pfn_to_page(hl2mfn);
+            struct page_info *hl2page = mfn_to_page(hl2mfn);
             adjust(hl2page, 0);
         }
 
@@ -240,7 +240,7 @@
             if ( l2e_get_flags(pt[i]) & _PAGE_PRESENT )
             {
                 unsigned long gmfn = l2e_get_pfn(pt[i]);
-                struct pfn_info *gpage = pfn_to_page(gmfn);
+                struct page_info *gpage = mfn_to_page(gmfn);
 
                 if ( gmfn < 0x100 )
                 {
@@ -287,7 +287,7 @@
             if ( l1e_get_flags(pt[i]) & _PAGE_PRESENT )
             {
                 unsigned long gmfn = l1e_get_pfn(pt[i]);
-                struct pfn_info *gpage = pfn_to_page(gmfn);
+                struct page_info *gpage = mfn_to_page(gmfn);
 
                 if ( gmfn < 0x100 )
                 {
@@ -354,7 +354,7 @@
     {
         struct shadow_status *a;
         unsigned long smfn, gmfn;
-        struct pfn_info *page;
+        struct page_info *page;
         int i;
 
         for ( i = 0; i < shadow_ht_buckets; i++ )
@@ -362,32 +362,32 @@
             a = &d->arch.shadow_ht[i];
             while ( a && a->gpfn_and_flags )
             {
-                gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
+                gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
                 smfn = a->smfn;
-                page = pfn_to_page(smfn);
+                page = mfn_to_page(smfn);
 
                 switch ( a->gpfn_and_flags & PGT_type_mask ) {
                 case PGT_writable_pred:
                     break;
                 case PGT_snapshot:
-                    adjust(pfn_to_page(gmfn), 0);
+                    adjust(mfn_to_page(gmfn), 0);
                     break;
                 case PGT_l1_shadow:
-                    adjust(pfn_to_page(gmfn), 0);
+                    adjust(mfn_to_page(gmfn), 0);
                     if ( shadow_refcounts )
                         adjust_l1_page(smfn);
                     if ( page->u.inuse.type_info & PGT_pinned )
                         adjust(page, 0);
                     break;
                 case PGT_hl2_shadow:
-                    adjust(pfn_to_page(gmfn), 0);
+                    adjust(mfn_to_page(gmfn), 0);
                     if ( shadow_refcounts )
                         adjust_hl2_page(smfn);
                     if ( page->u.inuse.type_info & PGT_pinned )
                         adjust(page, 0);
                     break;
                 case PGT_l2_shadow:
-                    adjust(pfn_to_page(gmfn), 0);
+                    adjust(mfn_to_page(gmfn), 0);
                     adjust_l2_page(smfn, 1);
                     if ( page->u.inuse.type_info & PGT_pinned )
                         adjust(page, 0);
@@ -411,15 +411,15 @@
 
         while ( oos )
         {
-            adjust(pfn_to_page(oos->gmfn), 0);
+            adjust(mfn_to_page(oos->gmfn), 0);
 
             // Only use entries that have low bits clear...
             //
             if ( !(oos->writable_pl1e & (sizeof(l1_pgentry_t)-1)) )
-                adjust(pfn_to_page(oos->writable_pl1e >> PAGE_SHIFT), 0);
+                adjust(mfn_to_page(oos->writable_pl1e >> PAGE_SHIFT), 0);
 
             if ( oos->snapshot_mfn != SHADOW_SNAPSHOT_ELSEWHERE )
-                adjust(pfn_to_page(oos->snapshot_mfn), 0);
+                adjust(mfn_to_page(oos->snapshot_mfn), 0);
 
             oos = oos->next;
             oos_count++;
@@ -433,28 +433,28 @@
         for_each_vcpu(d, v)
         {
             if ( pagetable_get_paddr(v->arch.guest_table) )
-                adjust(pfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
+                adjust(mfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
                        !shadow_mode_refcounts(d));
             if ( pagetable_get_paddr(v->arch.shadow_table) )
-                adjust(pfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
+                adjust(mfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
                        0);
             if ( v->arch.monitor_shadow_ref )
-                adjust(pfn_to_page(v->arch.monitor_shadow_ref), 0);
+                adjust(mfn_to_page(v->arch.monitor_shadow_ref), 0);
         }
     }
 
     void adjust_guest_pages()
     {
         struct list_head *list_ent = d->page_list.next;
-        struct pfn_info *page;
+        struct page_info *page;
         unsigned long mfn, snapshot_mfn;
 
         while ( list_ent != &d->page_list )
         {
             u32 page_type;
 
-            page = list_entry(list_ent, struct pfn_info, list);
-            snapshot_mfn = mfn = page_to_pfn(page);
+            page = list_entry(list_ent, struct page_info, list);
+            snapshot_mfn = mfn = page_to_mfn(page);
             page_type = page->u.inuse.type_info & PGT_type_mask;
 
             BUG_ON(page_get_owner(page) != d);
@@ -464,7 +464,7 @@
             if ( shadow_enabled && !shadow_refcounts &&
                  page_out_of_sync(page) )
             {
-                unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+                unsigned long gpfn = mfn_to_gmfn(d, mfn);
                 ASSERT( VALID_M2P(gpfn) );
                 snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
                 ASSERT( snapshot_mfn );
@@ -619,7 +619,7 @@
     void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
                              unsigned long mfn)
     {
-        struct pfn_info *page = pfn_to_page(mfn);
+        struct page_info *page = mfn_to_page(mfn);
         l1_pgentry_t *pt = map_domain_page(mfn);
         int i;
 
@@ -662,17 +662,17 @@
         if ( !shadow_mode_enabled(d) )
         {
             struct list_head *list_ent = d->page_list.next;
-            struct pfn_info *page;
+            struct page_info *page;
 
             while ( list_ent != &d->page_list )
             {
-                page = list_entry(list_ent, struct pfn_info, list);
+                page = list_entry(list_ent, struct page_info, list);
 
                 switch ( page->u.inuse.type_info & PGT_type_mask )
                 {
                 case PGT_l1_page_table:
                 case PGT_l2_page_table:
-                    scan_for_pfn_in_mfn(d, xmfn, page_to_pfn(page));
+                    scan_for_pfn_in_mfn(d, xmfn, page_to_mfn(page));
                     break;
                 default:
                     break;
@@ -720,7 +720,7 @@
 
     unsigned long mfn;
     struct list_head *list_ent;
-    struct pfn_info *page;
+    struct page_info *page;
     int errors = 0;
 
     if ( (d != current->domain) && shadow_mode_translate(d) )
@@ -751,8 +751,8 @@
         u32 page_type;
         unsigned long pfn;
 
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
         page_type = page->u.inuse.type_info & PGT_type_mask;
 
         BUG_ON(page_get_owner(page) != d);
@@ -806,7 +806,7 @@
                 printk("out of sync page mfn=%lx is not a page table\n", mfn);
                 errors++;
             }
-            pfn = __mfn_to_gpfn(d, mfn);
+            pfn = mfn_to_gmfn(d, mfn);
             if ( !__shadow_status(d, pfn, PGT_snapshot) )
             {
                 printk("out of sync page mfn=%lx doesn't have a snapshot\n",
@@ -845,8 +845,8 @@
     list_ent = d->page_list.next;
     while ( list_ent != &d->page_list )
     {
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
 
         switch ( page->u.inuse.type_info & PGT_type_mask)
         {
@@ -898,7 +898,7 @@
     if ( shadow_mode_enabled(d) )
     {
         struct shadow_status *a;
-        struct pfn_info *page;
+        struct page_info *page;
         u32 page_type;
         int i;
 
@@ -907,7 +907,7 @@
             a = &d->arch.shadow_ht[i];
             while ( a && a->gpfn_and_flags )
             {
-                page = pfn_to_page(a->smfn);
+                page = mfn_to_page(a->smfn);
                 page_type = a->gpfn_and_flags & PGT_type_mask;
 
                 switch ( page_type ) {
@@ -920,7 +920,7 @@
                     {
                         printk("Audit %d: shadow page counts wrong "
                                "mfn=%lx t=%" PRtype_info " c=%08x\n",
-                               d->domain_id, page_to_pfn(page),
+                               d->domain_id, page_to_mfn(page),
                                page->u.inuse.type_info,
                                page->count_info);
                         printk("a->gpfn_and_flags=%p\n",
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/dom0_ops.c   Wed Feb  1 15:28:50 2006
@@ -199,7 +199,7 @@
     
     case DOM0_GETPAGEFRAMEINFO:
     {
-        struct pfn_info *page;
+        struct page_info *page;
         unsigned long pfn = op->u.getpageframeinfo.pfn;
         domid_t dom = op->u.getpageframeinfo.domain;
         struct domain *d;
@@ -210,7 +210,7 @@
              unlikely((d = find_domain_by_id(dom)) == NULL) )
             break;
 
-        page = pfn_to_page(pfn);
+        page = mfn_to_page(pfn);
 
         if ( likely(get_page(page, d)) )
         {
@@ -282,12 +282,12 @@
      
             for( j = 0; j < k; j++ )
             {      
-                struct pfn_info *page;
+                struct page_info *page;
                 unsigned long mfn = l_arr[j];
 
-                page = pfn_to_page(mfn);
-
-                if ( likely(pfn_valid(mfn) && get_page(page, d)) ) 
+                page = mfn_to_page(mfn);
+
+                if ( likely(mfn_valid(mfn) && get_page(page, d)) ) 
                 {
                     unsigned long type = 0;
 
@@ -350,14 +350,14 @@
             list_ent = d->page_list.next;
             for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
             {
-                pfn = page_to_pfn(list_entry(list_ent, struct pfn_info, list));
+                pfn = page_to_mfn(list_entry(list_ent, struct page_info, 
list));
                 if ( put_user(pfn, buffer) )
                 {
                     ret = -EFAULT;
                     break;
                 }
                 buffer++;
-                list_ent = pfn_to_page(pfn)->list.next;
+                list_ent = mfn_to_page(pfn)->list.next;
             }
             spin_unlock(&d->page_alloc_lock);
 
@@ -420,8 +420,8 @@
             break;
 
         ret = -EACCES;
-        if ( !pfn_valid(mfn) ||
-             !get_page_and_type(pfn_to_page(mfn), d, PGT_writable_page) )
+        if ( !mfn_valid(mfn) ||
+             !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
         {
             put_domain(d);
             break;
@@ -433,7 +433,7 @@
         hypercall_page_initialise(hypercall_page);
         unmap_domain_page(hypercall_page);
 
-        put_page_and_type(pfn_to_page(mfn));
+        put_page_and_type(mfn_to_page(mfn));
 
         put_domain(d);
     }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/domain.c     Wed Feb  1 15:28:50 2006
@@ -179,7 +179,7 @@
 
 void dump_pageframe_info(struct domain *d)
 {
-    struct pfn_info *page;
+    struct page_info *page;
 
     printk("Memory pages belonging to domain %u:\n", d->domain_id);
 
@@ -192,7 +192,7 @@
         list_for_each_entry ( page, &d->page_list, list )
         {
             printk("    DomPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
-                   _p(page_to_phys(page)), _p(page_to_pfn(page)),
+                   _p(page_to_maddr(page)), _p(page_to_mfn(page)),
                    page->count_info, page->u.inuse.type_info);
         }
     }
@@ -200,7 +200,7 @@
     list_for_each_entry ( page, &d->xenpage_list, list )
     {
         printk("    XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
-               _p(page_to_phys(page)), _p(page_to_pfn(page)),
+               _p(page_to_maddr(page)), _p(page_to_mfn(page)),
                page->count_info, page->u.inuse.type_info);
     }
 }
@@ -400,7 +400,7 @@
 
     phys_basetab = c->ctrlreg[3];
     phys_basetab =
-        (__gpfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
+        (gmfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
         (phys_basetab & ~PAGE_MASK);
 
     v->arch.guest_table = mk_pagetable(phys_basetab);
@@ -410,7 +410,7 @@
 
     if ( shadow_mode_refcounts(d) )
     {
-        if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
+        if ( !get_page(mfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
         {
             destroy_gdt(v);
             return -EINVAL;
@@ -418,7 +418,7 @@
     }
     else if ( !(c->flags & VGCF_HVM_GUEST) )
     {
-        if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
+        if ( !get_page_and_type(mfn_to_page(phys_basetab>>PAGE_SHIFT), d,
                                 PGT_base_page_table) )
         {
             destroy_gdt(v);
@@ -879,7 +879,7 @@
 static void relinquish_memory(struct domain *d, struct list_head *list)
 {
     struct list_head *ent;
-    struct pfn_info  *page;
+    struct page_info  *page;
     unsigned long     x, y;
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
@@ -888,7 +888,7 @@
     ent = list->next;
     while ( ent != list )
     {
-        page = list_entry(ent, struct pfn_info, list);
+        page = list_entry(ent, struct page_info, list);
 
         /* Grab a reference to the page so it won't disappear from under us. */
         if ( unlikely(!get_page(page, d)) )
@@ -949,8 +949,8 @@
         if ( (pfn = pagetable_get_pfn(v->arch.guest_table)) != 0 )
         {
             if ( !shadow_mode_refcounts(d) )
-                put_page_type(pfn_to_page(pfn));
-            put_page(pfn_to_page(pfn));
+                put_page_type(mfn_to_page(pfn));
+            put_page(mfn_to_page(pfn));
 
             v->arch.guest_table = mk_pagetable(0);
         }
@@ -958,8 +958,8 @@
         if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 )
         {
             if ( !shadow_mode_refcounts(d) )
-                put_page_type(pfn_to_page(pfn));
-            put_page(pfn_to_page(pfn));
+                put_page_type(mfn_to_page(pfn));
+            put_page(mfn_to_page(pfn));
 
             v->arch.guest_table_user = mk_pagetable(0);
         }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/domain_build.c       Wed Feb  1 15:28:50 2006
@@ -75,9 +75,9 @@
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
 
-static struct pfn_info *alloc_chunk(struct domain *d, unsigned long max_pages)
+static struct page_info *alloc_chunk(struct domain *d, unsigned long max_pages)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned int order;
     /*
      * Allocate up to 2MB at a time: It prevents allocating very large chunks
@@ -143,7 +143,7 @@
     unsigned long alloc_spfn;
     unsigned long alloc_epfn;
     unsigned long count;
-    struct pfn_info *page = NULL;
+    struct page_info *page = NULL;
     start_info_t *si;
     struct vcpu *v = d->vcpu[0];
     char *p;
@@ -299,12 +299,12 @@
     /* Allocate from DMA pool: PAE L3 table must be below 4GB boundary. */
     if ( (page = alloc_domheap_pages(d, order, ALLOC_DOM_DMA)) == NULL )
         panic("Not enough RAM for domain 0 allocation.\n");
-    alloc_spfn = page_to_pfn(page);
+    alloc_spfn = page_to_mfn(page);
     alloc_epfn = alloc_spfn + d->tot_pages;
 
     printk("PHYSICAL MEMORY ARRANGEMENT:\n"
-           " Dom0 alloc.:   %"PRIphysaddr"->%"PRIphysaddr,
-           pfn_to_phys(alloc_spfn), pfn_to_phys(alloc_epfn));
+           " Dom0 alloc.:   %"PRIpaddr"->%"PRIpaddr,
+           pfn_to_paddr(alloc_spfn), pfn_to_paddr(alloc_epfn));
     if ( d->tot_pages < nr_pages )
         printk(" (%lu pages to be allocated)",
                nr_pages - d->tot_pages);
@@ -334,7 +334,7 @@
     }
 
     mpt_alloc = (vpt_start - dsi.v_start) + 
-        (unsigned long)pfn_to_phys(alloc_spfn);
+        (unsigned long)pfn_to_paddr(alloc_spfn);
 
     /*
      * We're basically forcing default RPLs to 1, so that our "what privilege
@@ -400,7 +400,7 @@
         *l1tab = l1e_from_pfn(mfn, L1_PROT);
         l1tab++;
         
-        page = pfn_to_page(mfn);
+        page = mfn_to_page(mfn);
         if ( !get_page_and_type(page, d, PGT_writable_page) )
             BUG();
 
@@ -413,7 +413,7 @@
     l1tab += l1_table_offset(vpt_start);
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
-        page = pfn_to_page(l1e_get_pfn(*l1tab));
+        page = mfn_to_page(l1e_get_pfn(*l1tab));
         if ( !opt_dom0_shadow )
             l1e_remove_flags(*l1tab, _PAGE_RW);
         else
@@ -496,7 +496,7 @@
     }
 
     /* WARNING: The new domain must have its 'processor' field filled in! */
-    phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
+    maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
     l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
     l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
@@ -511,21 +511,21 @@
     {
         if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
         {
-            phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
+            maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
             l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
             clear_page(l1tab);
             if ( count == 0 )
                 l1tab += l1_table_offset(dsi.v_start);
             if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
             {
-                phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
+                maddr_to_page(mpt_alloc)->u.inuse.type_info = 
PGT_l2_page_table;
                 l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
                 clear_page(l2tab);
                 if ( count == 0 )
                     l2tab += l2_table_offset(dsi.v_start);
                 if ( !((unsigned long)l3tab & (PAGE_SIZE-1)) )
                 {
-                    phys_to_page(mpt_alloc)->u.inuse.type_info =
+                    maddr_to_page(mpt_alloc)->u.inuse.type_info =
                         PGT_l3_page_table;
                     l3start = l3tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
                     clear_page(l3tab);
@@ -543,7 +543,7 @@
         *l1tab = l1e_from_pfn(mfn, L1_PROT);
         l1tab++;
 
-        page = pfn_to_page(mfn);
+        page = mfn_to_page(mfn);
         if ( (page->u.inuse.type_info == 0) &&
              !get_page_and_type(page, d, PGT_writable_page) )
             BUG();
@@ -562,7 +562,7 @@
     for ( count = 0; count < nr_pt_pages; count++ ) 
     {
         l1e_remove_flags(*l1tab, _PAGE_RW);
-        page = pfn_to_page(l1e_get_pfn(*l1tab));
+        page = mfn_to_page(l1e_get_pfn(*l1tab));
 
         /* Read-only mapping + PGC_allocated + page-table page. */
         page->count_info         = PGC_allocated | 3;
@@ -640,11 +640,11 @@
     memset(si, 0, PAGE_SIZE);
     si->nr_pages = nr_pages;
 
-    si->shared_info = virt_to_phys(d->shared_info);
+    si->shared_info = virt_to_maddr(d->shared_info);
     if ( opt_dom0_translate )
     {
         si->shared_info  = max_page << PAGE_SHIFT;
-        set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, max_page);
+        set_pfn_from_mfn(virt_to_maddr(d->shared_info) >> PAGE_SHIFT, 
max_page);
     }
 
     si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
@@ -672,7 +672,7 @@
             panic("Not enough RAM for DOM0 reservation.\n");
         while ( pfn < d->tot_pages )
         {
-            mfn = page_to_pfn(page);
+            mfn = page_to_mfn(page);
 #ifndef NDEBUG
 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
 #endif
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/hvm/hvm.c    Wed Feb  1 15:28:50 2006
@@ -59,18 +59,18 @@
     unsigned char e820_map_nr;
     struct e820entry *e820entry;
     unsigned char *p;
-    unsigned long mpfn;
+    unsigned long mfn;
     unsigned long gpfn = 0;
 
     local_flush_tlb_pge();
 
-    mpfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
-    if (mpfn == INVALID_MFN) {
+    mfn = get_mfn_from_pfn(E820_MAP_PAGE >> PAGE_SHIFT);
+    if (mfn == INVALID_MFN) {
         printk("Can not find E820 memory map page for HVM domain.\n");
         domain_crash_synchronous();
     }
 
-    p = map_domain_page(mpfn);
+    p = map_domain_page(mfn);
     if (p == NULL) {
         printk("Can not map E820 memory map page for HVM domain.\n");
         domain_crash_synchronous();
@@ -97,13 +97,13 @@
     unmap_domain_page(p);
 
     /* Initialise shared page */
-    mpfn = get_mfn_from_pfn(gpfn);
-    if (mpfn == INVALID_MFN) {
+    mfn = get_mfn_from_pfn(gpfn);
+    if (mfn == INVALID_MFN) {
         printk("Can not find io request shared page for HVM domain.\n");
         domain_crash_synchronous();
     }
 
-    p = map_domain_page_global(mpfn);
+    p = map_domain_page_global(mfn);
     if (p == NULL) {
         printk("Can not map io request shared page for HVM domain.\n");
         domain_crash_synchronous();
@@ -140,16 +140,16 @@
 static void hvm_get_info(struct domain *d)
 {
     unsigned char *p;
-    unsigned long mpfn;
+    unsigned long mfn;
     struct hvm_info_table *t;
 
-    mpfn = get_mfn_from_pfn(HVM_INFO_PFN);
-    if ( mpfn == INVALID_MFN ) {
+    mfn = get_mfn_from_pfn(HVM_INFO_PFN);
+    if ( mfn == INVALID_MFN ) {
         printk("Can not get info page mfn for HVM domain.\n");
         domain_crash_synchronous();
     }
 
-    p = map_domain_page(mpfn);
+    p = map_domain_page(mfn);
     if ( p == NULL ) {
         printk("Can not map info page for HVM domain.\n");
         domain_crash_synchronous();
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Feb  1 15:28:50 2006
@@ -1365,7 +1365,7 @@
         /* The guest CR3 must be pointing to the guest physical. */
         if (!VALID_MFN(mfn = 
                     get_mfn_from_pfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT))
-                || !get_page(pfn_to_page(mfn), v->domain))
+                || !get_page(mfn_to_page(mfn), v->domain))
         {
             printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
             domain_crash_synchronous(); /* need to take a clean path */
@@ -1435,7 +1435,7 @@
             unsigned long old_base_mfn;
             old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
             if (old_base_mfn)
-                put_page(pfn_to_page(old_base_mfn));
+                put_page(mfn_to_page(old_base_mfn));
        }
 #endif
         /* Now arch.guest_table points to machine physical. */
@@ -1571,7 +1571,7 @@
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
             if (((value >> PAGE_SHIFT) > v->domain->max_pages) 
                     || !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT))
-                    || !get_page(pfn_to_page(mfn), v->domain))
+                    || !get_page(mfn_to_page(mfn), v->domain))
             {
                 printk("Invalid CR3 value=%lx\n", value);
                 domain_crash_synchronous(); /* need to take a clean path */
@@ -1581,7 +1581,7 @@
             v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
 
             if (old_base_mfn)
-                put_page(pfn_to_page(old_base_mfn));
+                put_page(mfn_to_page(old_base_mfn));
 
             update_pagetables(v);
             
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Wed Feb  1 15:28:50 2006
@@ -155,8 +155,8 @@
     arch_svm->iopm = iopm;
     arch_svm->msrpm = msrpm;
 
-    vmcb->iopm_base_pa = (u64) virt_to_phys(iopm);
-    vmcb->msrpm_base_pa = (u64) virt_to_phys(msrpm);
+    vmcb->iopm_base_pa = (u64) virt_to_maddr(iopm);
+    vmcb->msrpm_base_pa = (u64) virt_to_maddr(msrpm);
 
     return 0;
 }
@@ -361,11 +361,11 @@
         goto err_out;
     }
 
-    phys_hsa = (u64) virt_to_phys(hsa);
+    phys_hsa = (u64) virt_to_maddr(hsa);
     arch_svm->host_save_area = hsa;
     arch_svm->host_save_pa   = phys_hsa;
 
-    arch_svm->vmcb_pa  = (u64) virt_to_phys(arch_svm->vmcb);
+    arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
 
     if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
     {
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Feb  1 15:28:50 2006
@@ -107,8 +107,8 @@
     clear_bit(PC_DEBUG_PORT, io_bitmap_a);
     memset(io_bitmap_b, 0xff, 0x1000);
 
-    error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_phys(io_bitmap_a));
-    error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_phys(io_bitmap_b));
+    error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a));
+    error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b));
 
     arch_vmx->io_bitmap_a = io_bitmap_a;
     arch_vmx->io_bitmap_b = io_bitmap_b;
@@ -405,7 +405,7 @@
         rc = -ENOMEM;
         goto err_out;
     }
-    vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
+    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
 
     if ((error = __vmpclear(vmcs_phys_ptr))) {
         printk("construct_vmcs: VMCLEAR failed\n");
@@ -474,9 +474,9 @@
 {
     int error;
     u64 vmcs_phys_ptr, old, old_phys_ptr;
-    vmcs_phys_ptr = (u64) virt_to_phys(arch_vmx->vmcs);
-
-    old_phys_ptr = virt_to_phys(&old);
+    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
+
+    old_phys_ptr = virt_to_maddr(&old);
     __vmptrst(old_phys_ptr);
     if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
         printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
@@ -512,14 +512,14 @@
 {
     if ( v->arch.hvm_vmx.launch_cpu == smp_processor_id() )
     {
-        load_vmcs(&v->arch.hvm_vmx, virt_to_phys(v->arch.hvm_vmx.vmcs));
+        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
         vmx_do_resume(v);
         reset_stack_and_jump(vmx_asm_do_resume);
     }
     else
     {
-        __vmpclear(virt_to_phys(v->arch.hvm_vmx.vmcs));
-        load_vmcs(&v->arch.hvm_vmx, virt_to_phys(v->arch.hvm_vmx.vmcs));
+        __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+        load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs));
         vmx_do_resume(v);
         vmx_set_host_env(v);
         v->arch.hvm_vmx.launch_cpu = smp_processor_id();
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Feb  1 15:28:50 2006
@@ -495,7 +495,7 @@
         return 0;
     }
 
-    phys_vmcs = (u64) virt_to_phys(vmcs);
+    phys_vmcs = (u64) virt_to_maddr(vmcs);
 
     if (!(__vmxon(phys_vmcs))) {
         printk("VMXON is done\n");
@@ -987,12 +987,12 @@
             return 0;
         }
         mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT);
-        if(!get_page(pfn_to_page(mfn), v->domain))
+        if(!get_page(mfn_to_page(mfn), v->domain))
                 return 0;
         old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
         v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
         if (old_base_mfn)
-             put_page(pfn_to_page(old_base_mfn));
+             put_page(mfn_to_page(old_base_mfn));
         update_pagetables(v);
         /*
          * arch.shadow_table should now hold the next CR3 for shadow
@@ -1159,7 +1159,7 @@
          */
         if ( !VALID_MFN(mfn = get_mfn_from_pfn(
             v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
-             !get_page(pfn_to_page(mfn), v->domain) )
+             !get_page(mfn_to_page(mfn), v->domain) )
         {
             printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
             domain_crash_synchronous(); /* need to take a clean path */
@@ -1232,7 +1232,7 @@
 
     if(!((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled)
         if(v->arch.hvm_vmx.cpu_cr3) {
-            put_page(pfn_to_page(get_mfn_from_pfn(
+            put_page(mfn_to_page(get_mfn_from_pfn(
                       v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
             v->arch.guest_table = mk_pagetable(0);
         }
@@ -1378,7 +1378,7 @@
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
             if ( ((value >> PAGE_SHIFT) > v->domain->max_pages ) ||
                  !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) ||
-                 !get_page(pfn_to_page(mfn), v->domain) )
+                 !get_page(mfn_to_page(mfn), v->domain) )
             {
                 printk("Invalid CR3 value=%lx", value);
                 domain_crash_synchronous(); /* need to take a clean path */
@@ -1386,7 +1386,7 @@
             old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
             v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
             if (old_base_mfn)
-                put_page(pfn_to_page(old_base_mfn));
+                put_page(mfn_to_page(old_base_mfn));
             update_pagetables(v);
             /*
              * arch.shadow_table should now hold the next CR3 for shadow
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/mm.c Wed Feb  1 15:28:50 2006
@@ -121,8 +121,8 @@
  */
 #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
 
-static void free_l2_table(struct pfn_info *page);
-static void free_l1_table(struct pfn_info *page);
+static void free_l2_table(struct page_info *page);
+static void free_l1_table(struct page_info *page);
 
 static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
                         unsigned long type);
@@ -148,27 +148,27 @@
 static struct domain *dom_xen, *dom_io;
 
 /* Frame table and its size in pages. */
-struct pfn_info *frame_table;
+struct page_info *frame_table;
 unsigned long max_page;
 unsigned long total_pages;
 
 void __init init_frametable(void)
 {
-    unsigned long nr_pages, page_step, i, pfn;
-
-    frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
+    unsigned long nr_pages, page_step, i, mfn;
+
+    frame_table = (struct page_info *)FRAMETABLE_VIRT_START;
 
     nr_pages  = PFN_UP(max_page * sizeof(*frame_table));
     page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
 
     for ( i = 0; i < nr_pages; i += page_step )
     {
-        pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
-        if ( pfn == 0 )
+        mfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
+        if ( mfn == 0 )
             panic("Not enough memory for frame table\n");
         map_pages_to_xen(
             FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
-            pfn, page_step, PAGE_HYPERVISOR);
+            mfn, page_step, PAGE_HYPERVISOR);
     }
 
     memset(frame_table, 0, nr_pages << PAGE_SHIFT);
@@ -179,7 +179,7 @@
     extern void subarch_init_memory(struct domain *);
 
     unsigned long i, pfn, rstart_pfn, rend_pfn;
-    struct pfn_info *page;
+    struct page_info *page;
 
     memset(percpu_info, 0, sizeof(percpu_info));
 
@@ -194,7 +194,7 @@
 
     /*
      * Initialise our DOMID_IO domain.
-     * This domain owns I/O pages that are within the range of the pfn_info
+     * This domain owns I/O pages that are within the range of the page_info
      * array. Mappings occur at the priv of the caller.
      */
     dom_io = alloc_domain();
@@ -204,7 +204,7 @@
     /* First 1MB of RAM is historically marked as I/O. */
     for ( i = 0; i < 0x100; i++ )
     {
-        page = pfn_to_page(i);
+        page = mfn_to_page(i);
         page->count_info        = PGC_allocated | 1;
         page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
         page_set_owner(page, dom_io);
@@ -220,8 +220,8 @@
         rend_pfn   = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
         for ( ; pfn < rstart_pfn; pfn++ )
         {
-            BUG_ON(!pfn_valid(pfn));
-            page = pfn_to_page(pfn);
+            BUG_ON(!mfn_valid(pfn));
+            page = mfn_to_page(pfn);
             page->count_info        = PGC_allocated | 1;
             page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
             page_set_owner(page, dom_io);
@@ -243,7 +243,7 @@
 {
     int i;
     unsigned long pfn;
-    struct pfn_info *page;
+    struct page_info *page;
     
     if ( v->arch.shadow_ldt_mapcnt == 0 )
         return;
@@ -255,7 +255,7 @@
         pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
         if ( pfn == 0 ) continue;
         v->arch.perdomain_ptes[i] = l1e_empty();
-        page = pfn_to_page(pfn);
+        page = mfn_to_page(pfn);
         ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
         put_page_and_type(page);
@@ -266,12 +266,12 @@
 }
 
 
-static int alloc_segdesc_page(struct pfn_info *page)
+static int alloc_segdesc_page(struct page_info *page)
 {
     struct desc_struct *descs;
     int i;
 
-    descs = map_domain_page(page_to_pfn(page));
+    descs = map_domain_page(page_to_mfn(page));
 
     for ( i = 0; i < 512; i++ )
         if ( unlikely(!check_descriptor(&descs[i])) )
@@ -291,7 +291,7 @@
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned long gpfn, gmfn;
+    unsigned long gmfn, mfn;
     l1_pgentry_t l1e, nl1e;
     unsigned long gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
     int res;
@@ -316,25 +316,25 @@
     if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
         return 0;
 
-    gpfn = l1e_get_pfn(l1e);
-    gmfn = __gpfn_to_mfn(d, gpfn);
-    if ( unlikely(!VALID_MFN(gmfn)) )
+    gmfn = l1e_get_pfn(l1e);
+    mfn = gmfn_to_mfn(d, gmfn);
+    if ( unlikely(!VALID_MFN(mfn)) )
         return 0;
 
-    res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
+    res = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
 
     if ( !res && unlikely(shadow_mode_refcounts(d)) )
     {
         shadow_lock(d);
-        shadow_remove_all_write_access(d, gpfn, gmfn);
-        res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
+        shadow_remove_all_write_access(d, gmfn, mfn);
+        res = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
         shadow_unlock(d);
     }
 
     if ( unlikely(!res) )
         return 0;
 
-    nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
+    nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
 
     v->arch.perdomain_ptes[off + 16] = nl1e;
     v->arch.shadow_ldt_mapcnt++;
@@ -345,9 +345,9 @@
 
 static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
 {
-    struct pfn_info *page = pfn_to_page(page_nr);
-
-    if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
+    struct page_info *page = mfn_to_page(page_nr);
+
+    if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
     {
         MEM_LOG("Could not get page ref for pfn %lx", page_nr);
         return 0;
@@ -361,7 +361,7 @@
                                          unsigned long type,
                                          struct domain *d)
 {
-    struct pfn_info *page = pfn_to_page(page_nr);
+    struct page_info *page = mfn_to_page(page_nr);
 
     if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
         return 0;
@@ -392,7 +392,7 @@
     root_pgentry_t re, unsigned long re_pfn, struct domain *d)
 {
     unsigned long x, y;
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned long pfn;
 
     ASSERT( !shadow_mode_refcounts(d) );
@@ -413,7 +413,7 @@
          * Make sure that the mapped frame is an already-validated L2 table. 
          * If so, atomically increment the count (checking for overflow).
          */
-        page = pfn_to_page(pfn);
+        page = mfn_to_page(pfn);
         y = page->u.inuse.type_info;
         do {
             x = y;
@@ -436,7 +436,7 @@
     l1_pgentry_t l1e, struct domain *d)
 {
     unsigned long mfn = l1e_get_pfn(l1e);
-    struct pfn_info *page = pfn_to_page(mfn);
+    struct page_info *page = mfn_to_page(mfn);
     int okay;
 
     if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
@@ -448,7 +448,7 @@
         return 0;
     }
 
-    if ( unlikely(!pfn_valid(mfn)) ||
+    if ( unlikely(!mfn_valid(mfn)) ||
          unlikely(page_get_owner(page) == dom_io) )
     {
         /* DOMID_IO reverts to caller for privilege checks. */
@@ -462,7 +462,7 @@
         }
 
         /* No reference counting for out-of-range I/O pages. */
-        if ( !pfn_valid(mfn) )
+        if ( !mfn_valid(mfn) )
             return 1;
 
         d = dom_io;
@@ -586,11 +586,11 @@
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
 {
     unsigned long    pfn  = l1e_get_pfn(l1e);
-    struct pfn_info *page = pfn_to_page(pfn);
+    struct page_info *page = mfn_to_page(pfn);
     struct domain   *e;
     struct vcpu     *v;
 
-    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(pfn) )
         return;
 
     e = page_get_owner(page);
@@ -644,7 +644,7 @@
 {
     if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
          (l2e_get_pfn(l2e) != pfn) )
-        put_page_and_type(pfn_to_page(l2e_get_pfn(l2e)));
+        put_page_and_type(mfn_to_page(l2e_get_pfn(l2e)));
 }
 
 
@@ -654,7 +654,7 @@
 {
     if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
          (l3e_get_pfn(l3e) != pfn) )
-        put_page_and_type(pfn_to_page(l3e_get_pfn(l3e)));
+        put_page_and_type(mfn_to_page(l3e_get_pfn(l3e)));
 }
 
 #endif
@@ -665,16 +665,16 @@
 {
     if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
          (l4e_get_pfn(l4e) != pfn) )
-        put_page_and_type(pfn_to_page(l4e_get_pfn(l4e)));
+        put_page_and_type(mfn_to_page(l4e_get_pfn(l4e)));
 }
 
 #endif
 
 
-static int alloc_l1_table(struct pfn_info *page)
+static int alloc_l1_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = page_to_pfn(page);
+    unsigned long  pfn = page_to_mfn(page);
     l1_pgentry_t  *pl1e;
     int            i;
 
@@ -703,7 +703,7 @@
 #ifdef CONFIG_X86_PAE
 static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     l2_pgentry_t    *pl2e;
     l3_pgentry_t     l3e3;
     int              i;
@@ -809,10 +809,10 @@
     ({ *(bp) = (unsigned long)(l2o) << L2_PAGETABLE_SHIFT; 1; })
 #endif
 
-static int alloc_l2_table(struct pfn_info *page, unsigned long type)
+static int alloc_l2_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = page_to_pfn(page);
+    unsigned long  pfn = page_to_mfn(page);
     unsigned long  vaddr;
     l2_pgentry_t  *pl2e;
     int            i;
@@ -863,10 +863,10 @@
 
 
 #if CONFIG_PAGING_LEVELS >= 3
-static int alloc_l3_table(struct pfn_info *page, unsigned long type)
+static int alloc_l3_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = page_to_pfn(page);
+    unsigned long  pfn = page_to_mfn(page);
     unsigned long  vaddr;
     l3_pgentry_t  *pl3e;
     int            i;
@@ -915,10 +915,10 @@
 #endif
 
 #if CONFIG_PAGING_LEVELS >= 4
-static int alloc_l4_table(struct pfn_info *page, unsigned long type)
+static int alloc_l4_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = page_to_pfn(page);
+    unsigned long  pfn = page_to_mfn(page);
     l4_pgentry_t  *pl4e = page_to_virt(page);
     unsigned long vaddr;
     int            i;
@@ -965,10 +965,10 @@
 #endif
 
 
-static void free_l1_table(struct pfn_info *page)
+static void free_l1_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long pfn = page_to_pfn(page);
+    unsigned long pfn = page_to_mfn(page);
     l1_pgentry_t *pl1e;
     int i;
 
@@ -982,9 +982,9 @@
 }
 
 
-static void free_l2_table(struct pfn_info *page)
-{
-    unsigned long pfn = page_to_pfn(page);
+static void free_l2_table(struct page_info *page)
+{
+    unsigned long pfn = page_to_mfn(page);
     l2_pgentry_t *pl2e;
     int i;
 
@@ -1000,9 +1000,9 @@
 
 #if CONFIG_PAGING_LEVELS >= 3
 
-static void free_l3_table(struct pfn_info *page)
-{
-    unsigned long pfn = page_to_pfn(page);
+static void free_l3_table(struct page_info *page)
+{
+    unsigned long pfn = page_to_mfn(page);
     l3_pgentry_t *pl3e;
     int           i;
 
@@ -1019,9 +1019,9 @@
 
 #if CONFIG_PAGING_LEVELS >= 4
 
-static void free_l4_table(struct pfn_info *page)
-{
-    unsigned long pfn = page_to_pfn(page);
+static void free_l4_table(struct page_info *page)
+{
+    unsigned long pfn = page_to_mfn(page);
     l4_pgentry_t *pl4e = page_to_virt(page);
     int           i;
 
@@ -1288,12 +1288,12 @@
 
 #endif
 
-int alloc_page_type(struct pfn_info *page, unsigned long type)
+int alloc_page_type(struct page_info *page, unsigned long type)
 {
     struct domain *owner = page_get_owner(page);
 
     if ( owner != NULL )
-        mark_dirty(owner, page_to_pfn(page));
+        mark_dirty(owner, page_to_mfn(page));
 
     switch ( type & PGT_type_mask )
     {
@@ -1319,10 +1319,10 @@
 }
 
 
-void free_page_type(struct pfn_info *page, unsigned long type)
+void free_page_type(struct page_info *page, unsigned long type)
 {
     struct domain *owner = page_get_owner(page);
-    unsigned long gpfn;
+    unsigned long gmfn;
 
     if ( likely(owner != NULL) )
     {
@@ -1337,14 +1337,14 @@
         {
             /* Raw page tables are rewritten during save/restore. */
             if ( !shadow_mode_translate(owner) )
-                mark_dirty(owner, page_to_pfn(page));
+                mark_dirty(owner, page_to_mfn(page));
 
             if ( shadow_mode_refcounts(owner) )
                 return;
 
-            gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
-            ASSERT(VALID_M2P(gpfn));
-            remove_shadow(owner, gpfn, type & PGT_type_mask);
+            gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
+            ASSERT(VALID_M2P(gmfn));
+            remove_shadow(owner, gmfn, type & PGT_type_mask);
         }
     }
 
@@ -1372,13 +1372,13 @@
 
     default:
         printk("%s: type %lx pfn %lx\n",__FUNCTION__,
-               type, page_to_pfn(page));
+               type, page_to_mfn(page));
         BUG();
     }
 }
 
 
-void put_page_type(struct pfn_info *page)
+void put_page_type(struct page_info *page)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
 
@@ -1433,7 +1433,7 @@
 }
 
 
-int get_page_type(struct pfn_info *page, unsigned long type)
+int get_page_type(struct page_info *page, unsigned long type)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
 
@@ -1443,7 +1443,7 @@
         nx = x + 1;
         if ( unlikely((nx & PGT_count_mask) == 0) )
         {
-            MEM_LOG("Type count overflow on pfn %lx", page_to_pfn(page));
+            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
             return 0;
         }
         else if ( unlikely((x & PGT_count_mask) == 0) )
@@ -1506,8 +1506,8 @@
                         MEM_LOG("Bad type (saw %" PRtype_info
                                 " != exp %" PRtype_info ") "
                                 "for mfn %lx (pfn %lx)",
-                                x, type, page_to_pfn(page),
-                                get_pfn_from_mfn(page_to_pfn(page)));
+                                x, type, page_to_mfn(page),
+                                get_pfn_from_mfn(page_to_mfn(page)));
                     return 0;
                 }
                 else if ( (x & PGT_va_mask) == PGT_va_mutable )
@@ -1547,7 +1547,7 @@
         {
             MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %"
                     PRtype_info ": caf=%08x taf=%" PRtype_info,
-                    page_to_pfn(page), get_pfn_from_mfn(page_to_pfn(page)),
+                    page_to_mfn(page), get_pfn_from_mfn(page_to_mfn(page)),
                     type, page->count_info, page->u.inuse.type_info);
             /* Noone else can get a reference. We hold the only ref. */
             page->u.inuse.type_info = 0;
@@ -1585,9 +1585,9 @@
         write_ptbase(v);
 
         if ( shadow_mode_refcounts(d) )
-            put_page(pfn_to_page(old_base_mfn));
+            put_page(mfn_to_page(old_base_mfn));
         else
-            put_page_and_type(pfn_to_page(old_base_mfn));
+            put_page_and_type(mfn_to_page(old_base_mfn));
 
         /* CR3 also holds a ref to its shadow... */
         if ( shadow_mode_enabled(d) )
@@ -1596,7 +1596,7 @@
                 put_shadow_ref(v->arch.monitor_shadow_ref);
             v->arch.monitor_shadow_ref =
                 pagetable_get_pfn(v->arch.monitor_table);
-            ASSERT(!page_get_owner(pfn_to_page(v->arch.monitor_shadow_ref)));
+            ASSERT(!page_get_owner(mfn_to_page(v->arch.monitor_shadow_ref)));
             get_shadow_ref(v->arch.monitor_shadow_ref);
         }
     }
@@ -1717,7 +1717,7 @@
     struct mmuext_op op;
     int rc = 0, i = 0, okay, cpu = smp_processor_id();
     unsigned long mfn, type, done = 0;
-    struct pfn_info *page;
+    struct page_info *page;
     struct vcpu *v = current;
     struct domain *d = v->domain;
 
@@ -1763,7 +1763,7 @@
 
         okay = 1;
         mfn  = op.arg1.mfn;
-        page = pfn_to_page(mfn);
+        page = mfn_to_page(mfn);
 
         switch ( op.cmd )
         {
@@ -1827,7 +1827,7 @@
             break;
 
         case MMUEXT_NEW_BASEPTR:
-            mfn = __gpfn_to_mfn(current->domain, mfn);
+            mfn = gmfn_to_mfn(current->domain, mfn);
             okay = new_guest_cr3(mfn);
             percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
             break;
@@ -1846,7 +1846,7 @@
                     pagetable_get_pfn(v->arch.guest_table_user);
                 v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
                 if ( old_mfn != 0 )
-                    put_page_and_type(pfn_to_page(old_mfn));
+                    put_page_and_type(mfn_to_page(old_mfn));
             }
             break;
 #endif
@@ -1965,8 +1965,8 @@
 {
     struct mmu_update req;
     void *va;
-    unsigned long gpfn, mfn;
-    struct pfn_info *page;
+    unsigned long gpfn, gmfn, mfn;
+    struct page_info *page;
     int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
     unsigned int cmd, done = 0;
     struct vcpu *v = current;
@@ -2034,8 +2034,8 @@
              */
         case MMU_NORMAL_PT_UPDATE:
 
-            gpfn = req.ptr >> PAGE_SHIFT;
-            mfn = __gpfn_to_mfn(d, gpfn);
+            gmfn = req.ptr >> PAGE_SHIFT;
+            mfn = gmfn_to_mfn(d, gmfn);
 
             if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
             {
@@ -2046,7 +2046,7 @@
             va = map_domain_page_with_cache(mfn, &mapcache);
             va = (void *)((unsigned long)va +
                           (unsigned long)(req.ptr & ~PAGE_MASK));
-            page = pfn_to_page(mfn);
+            page = mfn_to_page(mfn);
 
             switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
             {
@@ -2130,7 +2130,7 @@
                         if ( page_is_page_table(page) &&
                              !page_out_of_sync(page) )
                         {
-                            shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
+                            shadow_mark_mfn_out_of_sync(v, gmfn, mfn);
                         }
                     }
 
@@ -2171,7 +2171,7 @@
 
             mark_dirty(FOREIGNDOM, mfn);
 
-            put_page(pfn_to_page(mfn));
+            put_page(mfn_to_page(mfn));
             break;
 
         default:
@@ -2211,8 +2211,8 @@
 {
     int rc = GNTST_okay;
     void *va;
-    unsigned long gpfn, mfn;
-    struct pfn_info *page;
+    unsigned long gmfn, mfn;
+    struct page_info *page;
     u32 type_info;
     l1_pgentry_t ol1e;
     struct domain *d = v->domain;
@@ -2220,8 +2220,8 @@
     ASSERT(spin_is_locked(&d->big_lock));
     ASSERT(!shadow_mode_refcounts(d));
 
-    gpfn = pte_addr >> PAGE_SHIFT;
-    mfn = __gpfn_to_mfn(d, gpfn);
+    gmfn = pte_addr >> PAGE_SHIFT;
+    mfn = gmfn_to_mfn(d, gmfn);
 
     if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
     {
@@ -2231,7 +2231,7 @@
     
     va = map_domain_page(mfn);
     va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK));
-    page = pfn_to_page(mfn);
+    page = mfn_to_page(mfn);
 
     type_info = page->u.inuse.type_info;
     if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
@@ -2273,15 +2273,15 @@
 {
     int rc = GNTST_okay;
     void *va;
-    unsigned long gpfn, mfn;
-    struct pfn_info *page;
+    unsigned long gmfn, mfn;
+    struct page_info *page;
     u32 type_info;
     l1_pgentry_t ol1e;
 
     ASSERT(!shadow_mode_refcounts(d));
 
-    gpfn = addr >> PAGE_SHIFT;
-    mfn = __gpfn_to_mfn(d, gpfn);
+    gmfn = addr >> PAGE_SHIFT;
+    mfn = gmfn_to_mfn(d, gmfn);
 
     if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
     {
@@ -2291,7 +2291,7 @@
     
     va = map_domain_page(mfn);
     va = (void *)((unsigned long)va + (addr & ~PAGE_MASK));
-    page = pfn_to_page(mfn);
+    page = mfn_to_page(mfn);
 
     type_info = page->u.inuse.type_info;
     if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
@@ -2433,7 +2433,7 @@
 }
 
 int steal_page_for_grant_transfer(
-    struct domain *d, struct pfn_info *page)
+    struct domain *d, struct page_info *page)
 {
     u32 _d, _nd, x, y;
 
@@ -2453,7 +2453,7 @@
                      (1 | PGC_allocated)) || unlikely(_nd != _d)) { 
             DPRINTK("gnttab_transfer: Bad page %p: ed=%p(%u), sd=%p,"
                     " caf=%08x, taf=%" PRtype_info "\n", 
-                    (void *) page_to_pfn(page),
+                    (void *) page_to_mfn(page),
                     d, d->domain_id, unpickle_domptr(_nd), x, 
                     page->u.inuse.type_info);
             spin_unlock(&d->page_alloc_lock);
@@ -2612,7 +2612,7 @@
     for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
     {
         if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
-            put_page_and_type(pfn_to_page(pfn));
+            put_page_and_type(mfn_to_page(pfn));
         v->arch.perdomain_ptes[i] = l1e_empty();
         v->arch.guest_context.gdt_frames[i] = 0;
     }
@@ -2635,9 +2635,9 @@
 
     /* Check the pages in the new GDT. */
     for ( i = 0; i < nr_pages; i++ ) {
-        pfn = frames[i] = __gpfn_to_mfn(d, frames[i]);
+        pfn = frames[i] = gmfn_to_mfn(d, frames[i]);
         if ((pfn >= max_page) ||
-            !get_page_and_type(pfn_to_page(pfn), d, PGT_gdt_page) )
+            !get_page_and_type(mfn_to_page(pfn), d, PGT_gdt_page) )
             goto fail;
     }
 
@@ -2657,7 +2657,7 @@
 
  fail:
     while ( i-- > 0 )
-        put_page_and_type(pfn_to_page(frames[i]));
+        put_page_and_type(mfn_to_page(frames[i]));
     return -EINVAL;
 }
 
@@ -2689,11 +2689,11 @@
 long do_update_descriptor(u64 pa, u64 desc)
 {
     struct domain *dom = current->domain;
-    unsigned long gpfn = pa >> PAGE_SHIFT;
+    unsigned long gmfn = pa >> PAGE_SHIFT;
     unsigned long mfn;
     unsigned int  offset;
     struct desc_struct *gdt_pent, d;
-    struct pfn_info *page;
+    struct page_info *page;
     long ret = -EINVAL;
 
     offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(struct desc_struct);
@@ -2702,7 +2702,7 @@
 
     LOCK_BIGLOCK(dom);
 
-    if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ||
+    if ( !VALID_MFN(mfn = gmfn_to_mfn(dom, gmfn)) ||
          (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
          (mfn >= max_page) ||
          !check_descriptor(&d) )
@@ -2711,7 +2711,7 @@
         return -EINVAL;
     }
 
-    page = pfn_to_page(mfn);
+    page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, dom)) )
     {
         UNLOCK_BIGLOCK(dom);
@@ -2742,7 +2742,7 @@
         __mark_dirty(dom, mfn);
 
         if ( page_is_page_table(page) && !page_out_of_sync(page) )
-            shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
+            shadow_mark_mfn_out_of_sync(current, gmfn, mfn);
     }
 
     /* All is good so make the update. */
@@ -2798,7 +2798,7 @@
         {
             d->arch.first_reserved_pfn = pfn = d->max_pages;
             guest_physmap_add_page(
-                d, pfn + 0, virt_to_phys(d->shared_info) >> PAGE_SHIFT);
+                d, pfn + 0, virt_to_maddr(d->shared_info) >> PAGE_SHIFT);
             for ( i = 0; i < NR_GRANT_FRAMES; i++ )
                 guest_physmap_add_page(
                     d, pfn + 1 + i, gnttab_shared_mfn(d, d->grant_table, i));
@@ -2977,7 +2977,7 @@
         if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
         {
             if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
-                put_page_type(pfn_to_page(l1e_get_pfn(nl1e)));
+                put_page_type(mfn_to_page(l1e_get_pfn(nl1e)));
             continue;
         }
 
@@ -3110,13 +3110,13 @@
 
 static int ptwr_emulated_update(
     unsigned long addr,
-    physaddr_t old,
-    physaddr_t val,
+    paddr_t old,
+    paddr_t val,
     unsigned int bytes,
     unsigned int do_cmpxchg)
 {
     unsigned long pfn, l1va;
-    struct pfn_info *page;
+    struct page_info *page;
     l1_pgentry_t pte, ol1e, nl1e, *pl1e;
     struct domain *d = current->domain;
 
@@ -3129,25 +3129,25 @@
     }
 
     /* Turn a sub-word access into a full-word access. */
-    if ( bytes != sizeof(physaddr_t) )
+    if ( bytes != sizeof(paddr_t) )
     {
         int           rc;
-        physaddr_t    full;
-        unsigned int  offset = addr & (sizeof(physaddr_t)-1);
+        paddr_t    full;
+        unsigned int  offset = addr & (sizeof(paddr_t)-1);
 
         /* Align address; read full word. */
-        addr &= ~(sizeof(physaddr_t)-1);
+        addr &= ~(sizeof(paddr_t)-1);
         if ( (rc = x86_emulate_read_std(addr, (unsigned long *)&full,
-                                        sizeof(physaddr_t))) )
+                                        sizeof(paddr_t))) )
             return rc; 
         /* Mask out bits provided by caller. */
-        full &= ~((((physaddr_t)1 << (bytes*8)) - 1) << (offset*8));
+        full &= ~((((paddr_t)1 << (bytes*8)) - 1) << (offset*8));
         /* Shift the caller value and OR in the missing bits. */
-        val  &= (((physaddr_t)1 << (bytes*8)) - 1);
+        val  &= (((paddr_t)1 << (bytes*8)) - 1);
         val <<= (offset)*8;
         val  |= full;
         /* Also fill in missing parts of the cmpxchg old value. */
-        old  &= (((physaddr_t)1 << (bytes*8)) - 1);
+        old  &= (((paddr_t)1 << (bytes*8)) - 1);
         old <<= (offset)*8;
         old  |= full;
     }
@@ -3172,7 +3172,7 @@
     }
 
     pfn  = l1e_get_pfn(pte);
-    page = pfn_to_page(pfn);
+    page = mfn_to_page(pfn);
 
     /* We are looking only for read-only mappings of p.t. pages. */
     if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
@@ -3194,7 +3194,7 @@
     }
 
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_page(page_to_pfn(page));
+    pl1e = map_domain_page(page_to_mfn(page));
     pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
@@ -3261,7 +3261,7 @@
                        struct cpu_user_regs *regs)
 {
     unsigned long    pfn;
-    struct pfn_info *page;
+    struct page_info *page;
     l1_pgentry_t    *pl1e, pte;
     l2_pgentry_t    *pl2e, l2e;
     int              which, flags;
@@ -3283,7 +3283,7 @@
     }
 
     pfn  = l1e_get_pfn(pte);
-    page = pfn_to_page(pfn);
+    page = mfn_to_page(pfn);
 
 #ifdef CONFIG_X86_64
 #define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT | _PAGE_USER)
@@ -3473,8 +3473,8 @@
 
 int map_pages_to_xen(
     unsigned long virt,
-    unsigned long pfn,
-    unsigned long nr_pfns,
+    unsigned long mfn,
+    unsigned long nr_mfns,
     unsigned long flags)
 {
     l2_pgentry_t *pl2e, ol2e;
@@ -3484,17 +3484,17 @@
     unsigned int  map_small_pages = !!(flags & MAP_SMALL_PAGES);
     flags &= ~MAP_SMALL_PAGES;
 
-    while ( nr_pfns != 0 )
+    while ( nr_mfns != 0 )
     {
         pl2e = virt_to_xen_l2e(virt);
 
-        if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
-             (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
+        if ( ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
+             (nr_mfns >= (1<<PAGETABLE_ORDER)) &&
              !map_small_pages )
         {
             /* Super-page mapping. */
             ol2e  = *pl2e;
-            *pl2e = l2e_from_pfn(pfn, flags|_PAGE_PSE);
+            *pl2e = l2e_from_pfn(mfn, flags|_PAGE_PSE);
 
             if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
             {
@@ -3504,8 +3504,8 @@
             }
 
             virt    += 1UL << L2_PAGETABLE_SHIFT;
-            pfn     += 1UL << PAGETABLE_ORDER;
-            nr_pfns -= 1UL << PAGETABLE_ORDER;
+            mfn     += 1UL << PAGETABLE_ORDER;
+            nr_mfns -= 1UL << PAGETABLE_ORDER;
         }
         else
         {
@@ -3529,13 +3529,13 @@
 
             pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
             ol1e  = *pl1e;
-            *pl1e = l1e_from_pfn(pfn, flags);
+            *pl1e = l1e_from_pfn(mfn, flags);
             if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
                 local_flush_tlb_one(virt);
 
             virt    += 1UL << L1_PAGETABLE_SHIFT;
-            pfn     += 1UL;
-            nr_pfns -= 1UL;
+            mfn     += 1UL;
+            nr_mfns -= 1UL;
         }
     }
 
@@ -3575,7 +3575,7 @@
         flags &= ~_PAGE_PRESENT;
 
     map_pages_to_xen(
-        _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
+        _p, virt_to_maddr(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
 }
 
 void memguard_guard_range(void *p, unsigned long l)
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/mpparse.c
--- a/xen/arch/x86/mpparse.c    Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/mpparse.c    Wed Feb  1 15:28:50 2006
@@ -737,7 +737,7 @@
 
 static int __init smp_scan_config (unsigned long base, unsigned long length)
 {
-       unsigned int *bp = phys_to_virt(base);
+       unsigned int *bp = maddr_to_virt(base);
        struct intel_mp_floating *mpf;
 
        Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
@@ -754,9 +754,9 @@
 
                        smp_found_config = 1;
                        printk(KERN_INFO "found SMP MP-table at %08lx\n",
-                                               virt_to_phys(mpf));
+                                               virt_to_maddr(mpf));
 #if 0
-                       reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+                       reserve_bootmem(virt_to_maddr(mpf), PAGE_SIZE);
                        if (mpf->mpf_physptr) {
                                /*
                                 * We cannot access to MPC table to compute
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/setup.c      Wed Feb  1 15:28:50 2006
@@ -29,7 +29,7 @@
 
 /*
  * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
- * pfn_info table and allocation bitmap.
+ * page_info table and allocation bitmap.
  */
 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
 #if defined(CONFIG_X86_64)
@@ -153,7 +153,7 @@
     module_t *mod = (module_t *)__va(mbi->mods_addr);
     unsigned long nr_pages, modules_length;
     unsigned long initial_images_start, initial_images_end;
-    physaddr_t s, e;
+    paddr_t s, e;
     int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
     struct ns16550_defaults ns16550 = {
         .data_bits = 8,
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/shadow.c     Wed Feb  1 15:28:50 2006
@@ -77,7 +77,7 @@
 shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
                unsigned long new_type)
 {
-    struct pfn_info *page = pfn_to_page(gmfn);
+    struct page_info *page = mfn_to_page(gmfn);
     int pinned = 0, okay = 1;
 
     if ( page_out_of_sync(page) )
@@ -177,7 +177,7 @@
  *   general ref to the page.
  */
 /*
- * pfn_info fields for pages allocated as shadow pages:
+ * page_info fields for pages allocated as shadow pages:
  *
  * All 32 bits of count_info are a simple count of refs to this shadow
  * from a) other shadow pages, b) current CR3's (aka ed->arch.shadow_table),
@@ -204,7 +204,7 @@
                   unsigned long gpfn, unsigned long gmfn,
                   u32 psh_type)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned long smfn;
     int pin = 0;
     void *l1, *lp;
@@ -217,7 +217,7 @@
         if ( !list_empty(&d->arch.free_shadow_frames) )
         {
             struct list_head *entry = d->arch.free_shadow_frames.next;
-            page = list_entry(entry, struct pfn_info, list);
+            page = list_entry(entry, struct page_info, list);
             list_del(entry);
             perfc_decr(free_l1_pages);
         }
@@ -233,11 +233,11 @@
                 if (!page)
                     goto no_shadow_page;
 
-                l1 = map_domain_page(page_to_pfn(page));
+                l1 = map_domain_page(page_to_mfn(page));
                 memset(l1, 0, PAGE_SIZE);
                 unmap_domain_page(l1);
 
-                l1 = map_domain_page(page_to_pfn(page+1));
+                l1 = map_domain_page(page_to_mfn(page+1));
                 memset(l1, 0, PAGE_SIZE);
                 unmap_domain_page(l1);
 #else
@@ -245,7 +245,7 @@
                 if (!page)
                     goto no_shadow_page;
 
-                l1 = map_domain_page(page_to_pfn(page));
+                l1 = map_domain_page(page_to_mfn(page));
                 memset(l1, 0, PAGE_SIZE);
                 unmap_domain_page(l1);
 #endif
@@ -256,7 +256,7 @@
                 if (!page)
                     goto no_shadow_page;
 
-                l1 = map_domain_page(page_to_pfn(page));
+                l1 = map_domain_page(page_to_mfn(page));
                 memset(l1, 0, PAGE_SIZE);
                 unmap_domain_page(l1);
             }
@@ -280,12 +280,12 @@
         if (!page)
             goto no_shadow_page;
 
-        lp = map_domain_page(page_to_pfn(page));
+        lp = map_domain_page(page_to_mfn(page));
         memset(lp, 0, PAGE_SIZE);
         unmap_domain_page(lp);
     }
 
-    smfn = page_to_pfn(page);
+    smfn = page_to_mfn(page);
 
     ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
     page->u.inuse.type_info = psh_type | gmfn;
@@ -506,7 +506,7 @@
 
         for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
             spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
-                l2e_from_page(virt_to_page(page_get_owner(pfn_to_page(gmfn))->
+                l2e_from_page(virt_to_page(page_get_owner(mfn_to_page(gmfn))->
                                            arch.mm_perdomain_pt) + i,
                               __PAGE_HYPERVISOR);
 
@@ -566,7 +566,7 @@
         /* This L1 is NOT already shadowed so we need to shadow it. */
         SH_VVLOG("4a: l1 not shadowed");
 
-        gl1mfn = __gpfn_to_mfn(d, gl1pfn);
+        gl1mfn = gmfn_to_mfn(d, gl1pfn);
         if ( unlikely(!VALID_MFN(gl1mfn)) )
         {
             // Attempt to use an invalid pfn as an L1 page.
@@ -636,7 +636,7 @@
         guest_l2_pgentry_t tmp_gl2e = {0};
 
         __guest_get_l2e(v, va, &tmp_gl2e);
-        tmp_gmfn = __gpfn_to_mfn(d, l2e_get_pfn(tmp_gl2e));
+        tmp_gmfn = gmfn_to_mfn(d, l2e_get_pfn(tmp_gl2e));
         gpl1e = (guest_l1_pgentry_t *) map_domain_page(tmp_gmfn);
 
         /* If the PGT_l1_shadow has two continual pages */
@@ -673,7 +673,7 @@
             set_guest_back_ptr(d, sl1e, sl1mfn, i);
         }
 
-        pfn_to_page(sl1mfn)->tlbflush_timestamp =
+        mfn_to_page(sl1mfn)->tlbflush_timestamp =
             SHADOW_ENCODE_MIN_MAX(min, max);
 
         unmap_domain_page(gpl1e);
@@ -910,7 +910,7 @@
     u32 min_max = 0;
     int min, max, length;
 
-    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
+    if ( test_and_set_bit(_PGC_out_of_sync, &mfn_to_page(gmfn)->count_info) )
     {
         ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
         return SHADOW_SNAPSHOT_ELSEWHERE;
@@ -931,8 +931,8 @@
 
     if ( shadow_mode_refcounts(d) &&
          (shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow) )
-        min_max = pfn_to_page(sl1mfn)->tlbflush_timestamp;
-    pfn_to_page(smfn)->tlbflush_timestamp = min_max;
+        min_max = mfn_to_page(sl1mfn)->tlbflush_timestamp;
+    mfn_to_page(smfn)->tlbflush_timestamp = min_max;
 
     min = SHADOW_MIN(min_max);
     max = SHADOW_MAX(min_max);
@@ -956,11 +956,11 @@
                              unsigned long mfn)
 {
     struct domain *d = v->domain;
-    struct pfn_info *page = pfn_to_page(mfn);
+    struct page_info *page = mfn_to_page(mfn);
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
     ASSERT(shadow_lock_is_acquired(d));
-    ASSERT(pfn_valid(mfn));
+    ASSERT(mfn_valid(mfn));
 
 #ifndef NDEBUG
     {
@@ -1143,7 +1143,7 @@
 #else
     unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
 #endif
-    unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
+    unsigned long l2pfn = mfn_to_gmfn(d, l2mfn);
     guest_l2_pgentry_t l2e;
     unsigned long l1pfn, l1mfn;
     guest_l1_pgentry_t *guest_pt;
@@ -1177,7 +1177,7 @@
                 && i == PAGING_L4)
                 continue;       /* skip the top-level for 3-level */
 
-            if ( page_out_of_sync(pfn_to_page(gmfn)) &&
+            if ( page_out_of_sync(mfn_to_page(gmfn)) &&
                  !snapshot_entry_matches(
                      d, guest_pt, gpfn, table_offset_64(va, i)) )
             {
@@ -1192,7 +1192,7 @@
                 unmap_and_return (0);
             }
             gpfn = entry_get_pfn(le);
-            gmfn = __gpfn_to_mfn(d, gpfn);
+            gmfn = gmfn_to_mfn(d, gpfn);
             if ( !VALID_MFN(gmfn) )
             {
                 unmap_and_return (0);
@@ -1203,7 +1203,7 @@
         }
 
         /* L2 */
-        if ( page_out_of_sync(pfn_to_page(gmfn)) &&
+        if ( page_out_of_sync(mfn_to_page(gmfn)) &&
              !snapshot_entry_matches(d, guest_pt, gpfn, l2_table_offset(va)) )
         {
             unmap_and_return (1);
@@ -1217,7 +1217,7 @@
 #undef unmap_and_return
 #endif /* CONFIG_PAGING_LEVELS >= 3 */
     {
-        if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
+        if ( page_out_of_sync(mfn_to_page(l2mfn)) &&
              !snapshot_entry_matches(d, (guest_l1_pgentry_t 
*)v->arch.guest_vtable,
                                      l2pfn, guest_l2_table_offset(va)) )
             return 1;
@@ -1229,7 +1229,7 @@
         return 0;
 
     l1pfn = l2e_get_pfn(l2e);
-    l1mfn = __gpfn_to_mfn(d, l1pfn);
+    l1mfn = gmfn_to_mfn(d, l1pfn);
 
     // If the l1 pfn is invalid, it can't be out of sync...
     if ( !VALID_MFN(l1mfn) )
@@ -1237,7 +1237,7 @@
 
     guest_pt = (guest_l1_pgentry_t *) map_domain_page(l1mfn);
 
-    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
+    if ( page_out_of_sync(mfn_to_page(l1mfn)) &&
          !snapshot_entry_matches(
              d, guest_pt, l1pfn, guest_l1_table_offset(va)) ) 
     {
@@ -1327,18 +1327,18 @@
     int i;
     u32 found = 0;
     int is_l1_shadow =
-        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
+        ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 #if CONFIG_PAGING_LEVELS == 4
     is_l1_shadow |=
-      ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
+      ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
                 PGT_fl1_shadow);
 #endif
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
+        i = (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
             >> PGT_va_shift;
 
         if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
@@ -1376,7 +1376,7 @@
 
     // If it's not a writable page, then no writable refs can be outstanding.
     //
-    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
+    if ( (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
          PGT_writable_page )
     {
         perfc_incrc(remove_write_not_writable);
@@ -1386,7 +1386,7 @@
     // How many outstanding writable PTEs for this page are there?
     //
     write_refs =
-        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
+        (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
     if ( write_refs && MFN_PINNED(readonly_gmfn) )
     {
         write_refs--;
@@ -1404,7 +1404,7 @@
 
          // Use the back pointer to locate the shadow page that can contain
          // the PTE of interest
-         if ( (predicted_smfn = 
pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
+         if ( (predicted_smfn = 
mfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
              found += remove_all_write_access_in_ptpage(
                  d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
              if ( found == write_refs )
@@ -1478,7 +1478,7 @@
             // the new contents of the guest page iff this it has the right
             // page type.
             //
-            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & 
PGT_type_mask) )
+            if ( stype != ( mfn_to_page(entry->gmfn)->u.inuse.type_info & 
PGT_type_mask) )
                 continue;
         }
 
@@ -1498,11 +1498,11 @@
 
         unshadow = 0;
 
-        min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
+        min_max_shadow = mfn_to_page(smfn)->tlbflush_timestamp;
         min_shadow     = SHADOW_MIN(min_max_shadow);
         max_shadow     = SHADOW_MAX(min_max_shadow);
 
-        min_max_snapshot= pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
+        min_max_snapshot= mfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
         min_snapshot    = SHADOW_MIN(min_max_snapshot);
         max_snapshot    = SHADOW_MAX(min_max_snapshot);
 
@@ -1673,7 +1673,7 @@
                     if ( !(entry_get_flags(guest_pt[i]) & _PAGE_PRESENT) &&
                          unlikely(entry_get_value(guest_pt[i]) != 0) &&
                          !unshadow &&
-                         (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
+                         (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                         unshadow = 1;
                 }
 #endif
@@ -1721,7 +1721,7 @@
                 if ( !(guest_root_get_flags(new_root_e) & _PAGE_PRESENT) &&
                      unlikely(guest_root_get_intpte(new_root_e) != 0) &&
                      !unshadow &&
-                     (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
+                     (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
                     unshadow = 1;
             }
             if ( max == -1 )
@@ -1848,7 +1848,7 @@
     guest_l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte;
     unsigned long gpfn = l1e_get_pfn(gpte);
-    unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
+    unsigned long gmfn = gmfn_to_mfn(d, gpfn);
 
     //printk("l1pte_write_fault gmfn=%lx\n", gmfn);
 
@@ -1883,7 +1883,7 @@
     guest_l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte = *spte_p;
     unsigned long pfn = l1e_get_pfn(gpte);
-    unsigned long mfn = __gpfn_to_mfn(d, pfn);
+    unsigned long mfn = gmfn_to_mfn(d, pfn);
 
     if ( unlikely(!VALID_MFN(mfn)) )
     {
@@ -2028,7 +2028,7 @@
             domain_crash_synchronous();
         }
 
-        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
+        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gpde)));
     }
 
     shadow_set_l1e(va, spte, 1);
@@ -2057,7 +2057,7 @@
     if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PRESENT)) )
         return INVALID_MFN;
 
-    return __gpfn_to_mfn(d, l2e_get_pfn(gl2e));
+    return gmfn_to_mfn(d, l2e_get_pfn(gl2e));
 }
 
 static int do_update_va_mapping(unsigned long va,
@@ -2132,7 +2132,7 @@
     unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
 #endif
 
-    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
+    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
     unsigned long smfn, old_smfn;
 
 #if CONFIG_PAGING_LEVELS == 2
@@ -2400,7 +2400,7 @@
         FAIL("global bit set in shadow");
 
     eff_guest_pfn = l1e_get_pfn(eff_guest_pte);
-    eff_guest_mfn = __gpfn_to_mfn(d, eff_guest_pfn);
+    eff_guest_mfn = gmfn_to_mfn(d, eff_guest_pfn);
     shadow_mfn = l1e_get_pfn(shadow_pte);
 
     if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
@@ -2417,7 +2417,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
+               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW coherence");
     }
@@ -2428,7 +2428,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
+               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW2 coherence");
     }
@@ -2468,7 +2468,7 @@
     l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
     int errors = 0;
 
-    if ( page_out_of_sync(pfn_to_page(gmfn)) )
+    if ( page_out_of_sync(mfn_to_page(gmfn)) )
     {
         snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
         ASSERT(snapshot_mfn);
@@ -2508,13 +2508,13 @@
     int errors = 0;
     int limit;
 
-    if ( !oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != d) )
+    if ( !oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != d) )
         FAILPT("domain doesn't own page");
-    if ( oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != NULL) )
+    if ( oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != NULL) )
         FAILPT("bogus owner for snapshot page");
-    if ( page_get_owner(pfn_to_page(smfn)) != NULL )
+    if ( page_get_owner(mfn_to_page(smfn)) != NULL )
         FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
-               smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
+               smfn, page_get_owner(mfn_to_page(smfn))->domain_id);
 
 #if 0
     if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
@@ -2611,14 +2611,14 @@
     perfc_incrc(check_pagetable);
 
     ptbase_mfn = gptbase >> PAGE_SHIFT;
-    ptbase_pfn = __mfn_to_gpfn(d, ptbase_mfn);
+    ptbase_pfn = mfn_to_gmfn(d, ptbase_mfn);
 
     if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
     {
         printk("%s-PT %lx not shadowed\n", s, gptbase);
         goto out;
     }
-    if ( page_out_of_sync(pfn_to_page(ptbase_mfn)) )
+    if ( page_out_of_sync(mfn_to_page(ptbase_mfn)) )
     {
         ptbase_mfn = __shadow_status(d, ptbase_pfn, PGT_snapshot);
         oos_pdes = 1;
@@ -2643,7 +2643,7 @@
     for ( i = 0; i < limit; i++ )
     {
         unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
-        unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
+        unsigned long gl1mfn = gmfn_to_mfn(d, gl1pfn);
         unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
 
         if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
@@ -2689,7 +2689,7 @@
         a = &d->arch.shadow_ht[i];
         while ( a && a->gpfn_and_flags )
         {
-            gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
+            gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
 
             switch ( a->gpfn_and_flags & PGT_type_mask )
             {
@@ -2699,7 +2699,7 @@
                 break;
             case PGT_l2_shadow:
                 errors += check_l2_table(v, gmfn, a->smfn,
-                                         page_out_of_sync(pfn_to_page(gmfn)));
+                                         page_out_of_sync(mfn_to_page(gmfn)));
                 break;
             case PGT_l3_shadow:
             case PGT_l4_shadow:
@@ -2797,7 +2797,7 @@
          * When we free L2 pages, we need to tell if the page contains
          * Xen private mappings. Use the va_mask part.
          */
-        pfn_to_page(s2mfn)->u.inuse.type_info |= 
+        mfn_to_page(s2mfn)->u.inuse.type_info |= 
             (unsigned long) 3 << PGT_score_shift; 
 
         memset(spl2e, 0, 
@@ -2810,7 +2810,7 @@
         for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
             spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
                 l2e_from_page(
-                    
virt_to_page(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
+                    
virt_to_page(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
                     __PAGE_HYPERVISOR);
         for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
             spl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
@@ -2845,7 +2845,7 @@
 {
     unsigned int count;
     unsigned long sl2mfn;
-    struct pfn_info *page;
+    struct page_info *page;
     void *l2;
 
     memset(spl4e, 0, PAGE_SIZE);
@@ -2860,7 +2860,7 @@
 
     for (count = 0; count < PDP_ENTRIES; count++)
     {
-        sl2mfn = page_to_pfn(page+count);
+        sl2mfn = page_to_mfn(page+count);
         l2 = map_domain_page(sl2mfn);
         memset(l2, 0, PAGE_SIZE);
         unmap_domain_page(l2);
@@ -2912,7 +2912,7 @@
            ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
 
         spl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
-            
l4e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_l3),
+            
l4e_from_paddr(__pa(page_get_owner(mfn_to_page(gmfn))->arch.mm_perdomain_l3),
                             __PAGE_HYPERVISOR);
 
         if ( shadow_mode_translate(d) ) // NB: not external
@@ -3000,7 +3000,7 @@
         /* This is NOT already shadowed so we need to shadow it. */
         SH_VVLOG("<get_shadow_mfn>: not shadowed");
 
-        gmfn = __gpfn_to_mfn(d, gpfn);
+        gmfn = gmfn_to_mfn(d, gpfn);
         if ( unlikely(!VALID_MFN(gmfn)) )
         {
             // Attempt to use an invalid pfn as an shadow page.
@@ -3168,7 +3168,7 @@
         sl2e = l2e_from_pfn(l1_mfn, l2e_get_flags(tmp_l2e));
     } else {
         /* Allocate a new page as shadow page table if need */
-        gmfn = __gpfn_to_mfn(d, start_gpfn);
+        gmfn = gmfn_to_mfn(d, start_gpfn);
         l1_mfn = alloc_shadow_page(d, start_gpfn | nx, gmfn, PGT_fl1_shadow);
         if (unlikely(!l1_mfn)) {
             BUG();
@@ -3193,7 +3193,7 @@
     for (gpfn = start_gpfn;
       gpfn < (start_gpfn + L1_PAGETABLE_ENTRIES); gpfn++) {
 
-        mfn = __gpfn_to_mfn(d, gpfn);
+        mfn = gmfn_to_mfn(d, gpfn);
 
         if ( unlikely(!VALID_MFN(mfn)) )
         {
@@ -3343,7 +3343,7 @@
         /*
          * If it's not external mode, then mfn should be machine physical.
          */
-        mfn = __gpfn_to_mfn(d, gpfn);
+        mfn = gmfn_to_mfn(d, gpfn);
 
         lva = (pgentry_64_t *) map_domain_page(mfn);
         gle = lva[table_offset_64(va, i)];
@@ -3492,7 +3492,7 @@
         if (unlikely(!__guest_set_l1e(v, va, &gl1e))) 
             domain_crash_synchronous();
 
-        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
+        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gl2e)));
     }
 
     shadow_set_l1e_64(va, (pgentry_64_t *)&sl1e, 1);
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/shadow32.c   Wed Feb  1 15:28:50 2006
@@ -30,7 +30,7 @@
 #include <xen/sched.h>
 #include <xen/trace.h>
 
-#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
+#define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
 #define va_to_l1mfn(_ed, _va) \
     (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
 
@@ -59,7 +59,7 @@
 shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
                unsigned long new_type)
 {
-    struct pfn_info *page = pfn_to_page(gmfn);
+    struct page_info *page = mfn_to_page(gmfn);
     int pinned = 0, okay = 1;
 
     if ( page_out_of_sync(page) )
@@ -144,13 +144,13 @@
     if ( !shadow_mode_refcounts(d) )
         return;
 
-    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
+    ASSERT(mfn_to_page(gmfn)->count_info & PGC_page_table);
 
     if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
     {
-        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
-
-        if ( page_out_of_sync(pfn_to_page(gmfn)) )
+        clear_bit(_PGC_page_table, &mfn_to_page(gmfn)->count_info);
+
+        if ( page_out_of_sync(mfn_to_page(gmfn)) )
         {
             remove_out_of_sync_entries(d, gmfn);
         }
@@ -178,7 +178,7 @@
  *   general ref to the page.
  */
 /*
- * pfn_info fields for pages allocated as shadow pages:
+ * page_info fields for pages allocated as shadow pages:
  *
  * All 32 bits of count_info are a simple count of refs to this shadow
  * from a) other shadow pages, b) current CR3's (aka ed->arch.shadow_table),
@@ -205,7 +205,7 @@
                   unsigned long gpfn, unsigned long gmfn,
                   u32 psh_type)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned long smfn;
     int pin = 0;
     void *l1;
@@ -218,14 +218,14 @@
         if ( !list_empty(&d->arch.free_shadow_frames) )
         {
             struct list_head *entry = d->arch.free_shadow_frames.next;
-            page = list_entry(entry, struct pfn_info, list);
+            page = list_entry(entry, struct page_info, list);
             list_del(entry);
             perfc_decr(free_l1_pages);
         }
         else
         {
             page = alloc_domheap_page(NULL);
-            l1 = map_domain_page(page_to_pfn(page));
+            l1 = map_domain_page(page_to_mfn(page));
             memset(l1, 0, PAGE_SIZE);
             unmap_domain_page(l1);
         }
@@ -245,7 +245,7 @@
         BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
     }
 
-    smfn = page_to_pfn(page);
+    smfn = page_to_mfn(page);
 
     ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
     page->u.inuse.type_info = psh_type | gmfn;
@@ -320,7 +320,7 @@
 {
     l1_pgentry_t *pl1e = map_domain_page(smfn);
     int i;
-    struct pfn_info *spage = pfn_to_page(smfn);
+    struct page_info *spage = mfn_to_page(smfn);
     u32 min_max = spage->tlbflush_timestamp;
     int min = SHADOW_MIN(min_max);
     int max = SHADOW_MAX(min_max);
@@ -350,7 +350,7 @@
     for ( i = 0; i < limit; i++ )
     {
         if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
-            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
+            put_page(mfn_to_page(l1e_get_pfn(hl2[i])));
     }
 
     unmap_domain_page(hl2);
@@ -380,10 +380,10 @@
 
 void free_shadow_page(unsigned long smfn)
 {
-    struct pfn_info *page = pfn_to_page(smfn);
+    struct page_info *page = mfn_to_page(smfn);
     unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
-    struct domain *d = page_get_owner(pfn_to_page(gmfn));
-    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
+    struct domain *d = page_get_owner(mfn_to_page(gmfn));
+    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
     unsigned long type = page->u.inuse.type_info & PGT_type_mask;
 
     SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
@@ -422,7 +422,7 @@
 
     default:
         printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
-               page_to_pfn(page), page->u.inuse.type_info);
+               page_to_mfn(page), page->u.inuse.type_info);
         break;
     }
 
@@ -463,9 +463,9 @@
 static void inline
 release_out_of_sync_entry(struct domain *d, struct out_of_sync_entry *entry)
 {
-    struct pfn_info *page;
-
-    page = pfn_to_page(entry->gmfn);
+    struct page_info *page;
+
+    page = mfn_to_page(entry->gmfn);
 
     // Decrement ref count of guest & shadow pages
     //
@@ -698,7 +698,7 @@
     /* Now free the pre-zero'ed pages from the domain */
     list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
     {
-        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
+        struct page_info *page = list_entry(list_ent, struct page_info, list);
 
         list_del(list_ent);
         perfc_decr(free_l1_pages);
@@ -724,7 +724,7 @@
 {
     unsigned long mmfn;
     l2_pgentry_t *mpl2e;
-    struct pfn_info *mmfn_info;
+    struct page_info *mmfn_info;
     struct domain *d = v->domain;
     int i;
 
@@ -733,7 +733,7 @@
     mmfn_info = alloc_domheap_page(NULL);
     ASSERT(mmfn_info != NULL);
 
-    mmfn = page_to_pfn(mmfn_info);
+    mmfn = page_to_mfn(mmfn_info);
     mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
     memset(mpl2e, 0, PAGE_SIZE);
 
@@ -797,7 +797,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page_global(v->arch.monitor_vtable);
-    free_domheap_page(pfn_to_page(mfn));
+    free_domheap_page(mfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -811,7 +811,7 @@
     unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
     l2_pgentry_t *l2, l2e;
     l1_pgentry_t *l1;
-    struct pfn_info *l1page;
+    struct page_info *l1page;
     unsigned long va = pfn << PAGE_SHIFT;
 
     ASSERT(tabpfn != 0);
@@ -828,7 +828,7 @@
             return 0;
         }
 
-        l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache);
+        l1 = map_domain_page_with_cache(page_to_mfn(l1page), l1cache);
         memset(l1, 0, PAGE_SIZE);
         unmap_domain_page_with_cache(l1, l1cache);
 
@@ -848,7 +848,7 @@
 alloc_p2m_table(struct domain *d)
 {
     struct list_head *list_ent;
-    struct pfn_info *page, *l2page;
+    struct page_info *page, *l2page;
     l2_pgentry_t *l2;
     unsigned long mfn, pfn;
     struct domain_mmap_cache l1cache, l2cache;
@@ -860,16 +860,16 @@
     domain_mmap_cache_init(&l1cache);
     domain_mmap_cache_init(&l2cache);
 
-    d->arch.phys_table = mk_pagetable(page_to_phys(l2page));
-    l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache);
+    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
+    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
     memset(l2, 0, PAGE_SIZE);
     unmap_domain_page_with_cache(l2, &l2cache);
 
     list_ent = d->page_list.next;
     while ( list_ent != &d->page_list )
     {
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
         pfn = get_pfn_from_mfn(mfn);
         ASSERT(pfn != INVALID_M2P_ENTRY);
         ASSERT(pfn < (1u<<20));
@@ -882,8 +882,8 @@
     list_ent = d->xenpage_list.next;
     while ( list_ent != &d->xenpage_list )
     {
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
         pfn = get_pfn_from_mfn(mfn);
         if ( (pfn != INVALID_M2P_ENTRY) &&
              (pfn < (1u<<20)) )
@@ -1020,7 +1020,7 @@
         {
             // external guests provide their own memory for their P2M maps.
             //
-            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
+            ASSERT(d == page_get_owner(mfn_to_page(pagetable_get_pfn(
                 d->arch.phys_table))));
         }
     }
@@ -1034,7 +1034,7 @@
     if ( shadow_mode_refcounts(d) )
     {
         struct list_head *list_ent; 
-        struct pfn_info *page;
+        struct page_info *page;
 
         /*
          * Tear down its counts by disassembling its page-table-based refcounts
@@ -1062,7 +1062,7 @@
         for (list_ent = d->page_list.next; list_ent != &d->page_list; 
              list_ent = page->list.next) {
             
-            page = list_entry(list_ent, struct pfn_info, list);
+            page = list_entry(list_ent, struct page_info, list);
 
             if ( !get_page_type(page, PGT_writable_page) )
                 BUG();
@@ -1122,7 +1122,7 @@
              (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
         {
             unsigned long mfn = l1e_get_pfn(l1[i]);
-            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+            unsigned long gpfn = mfn_to_gmfn(d, mfn);
             ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
             l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
         }
@@ -1150,7 +1150,7 @@
              (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
         {
             unsigned long mfn = l2e_get_pfn(l2[i]);
-            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+            unsigned long gpfn = mfn_to_gmfn(d, mfn);
             ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
             l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
             translate_l1pgtable(d, p2m, mfn);
@@ -1554,7 +1554,7 @@
 
         for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
             spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
-            l2e_from_page(virt_to_page(page_get_owner(pfn_to_page(gmfn))->
+            l2e_from_page(virt_to_page(page_get_owner(mfn_to_page(gmfn))->
                                        arch.mm_perdomain_pt) + i,
                           __PAGE_HYPERVISOR);
 
@@ -1611,7 +1611,7 @@
         /* This L1 is NOT already shadowed so we need to shadow it. */
         SH_VVLOG("4a: l1 not shadowed");
 
-        gl1mfn = __gpfn_to_mfn(d, gl1pfn);
+        gl1mfn = gmfn_to_mfn(d, gl1pfn);
         if ( unlikely(!VALID_MFN(gl1mfn)) )
         {
             // Attempt to use an invalid pfn as an L1 page.
@@ -1687,7 +1687,7 @@
             set_guest_back_ptr(d, sl1e, sl1mfn, i);
         }
 
-        pfn_to_page(sl1mfn)->tlbflush_timestamp =
+        mfn_to_page(sl1mfn)->tlbflush_timestamp =
             SHADOW_ENCODE_MIN_MAX(min, max);
     }
 }
@@ -1770,7 +1770,7 @@
     u32 min_max = 0;
     int min, max, length;
 
-    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
+    if ( test_and_set_bit(_PGC_out_of_sync, &mfn_to_page(gmfn)->count_info) )
     {
         ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
         return SHADOW_SNAPSHOT_ELSEWHERE;
@@ -1791,8 +1791,8 @@
 
     if ( shadow_mode_refcounts(d) &&
          (shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow) )
-        min_max = pfn_to_page(sl1mfn)->tlbflush_timestamp;
-    pfn_to_page(smfn)->tlbflush_timestamp = min_max;
+        min_max = mfn_to_page(sl1mfn)->tlbflush_timestamp;
+    mfn_to_page(smfn)->tlbflush_timestamp = min_max;
 
     min = SHADOW_MIN(min_max);
     max = SHADOW_MAX(min_max);
@@ -1821,7 +1821,7 @@
 
     // Clear the out_of_sync bit.
     //
-    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
+    clear_bit(_PGC_out_of_sync, &mfn_to_page(entry->gmfn)->count_info);
 
     // XXX Need to think about how to protect the domain's
     // information less expensively.
@@ -1838,11 +1838,11 @@
                              unsigned long mfn)
 {
     struct domain *d = v->domain;
-    struct pfn_info *page = pfn_to_page(mfn);
+    struct page_info *page = mfn_to_page(mfn);
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
     ASSERT(shadow_lock_is_acquired(d));
-    ASSERT(pfn_valid(mfn));
+    ASSERT(mfn_valid(mfn));
 
 #ifndef NDEBUG
     {
@@ -1995,7 +1995,7 @@
 {
     struct domain *d = v->domain;
     unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
-    unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
+    unsigned long l2pfn = mfn_to_gmfn(d, l2mfn);
     l2_pgentry_t l2e;
     unsigned long l1pfn, l1mfn;
 
@@ -2004,7 +2004,7 @@
 
     perfc_incrc(shadow_out_of_sync_calls);
 
-    if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
+    if ( page_out_of_sync(mfn_to_page(l2mfn)) &&
          !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
                                  l2pfn, l2_table_offset(va)) )
         return 1;
@@ -2014,13 +2014,13 @@
         return 0;
 
     l1pfn = l2e_get_pfn(l2e);
-    l1mfn = __gpfn_to_mfn(d, l1pfn);
+    l1mfn = gmfn_to_mfn(d, l1pfn);
 
     // If the l1 pfn is invalid, it can't be out of sync...
     if ( !VALID_MFN(l1mfn) )
         return 0;
 
-    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
+    if ( page_out_of_sync(mfn_to_page(l1mfn)) &&
          !snapshot_entry_matches(
              d, &linear_pg_table[l1_linear_offset(va) & 
~(L1_PAGETABLE_ENTRIES-1)],
              l1pfn, l1_table_offset(va)) )
@@ -2148,13 +2148,13 @@
     int i;
     u32 found = 0;
     int is_l1_shadow =
-        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
+        ((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(readonly_gmfn, flags);
 
     if ( shadow_mode_external(d) ) {
-        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
+        i = (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
             >> PGT_va_shift;
 
         if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
@@ -2192,7 +2192,7 @@
 
     // If it's not a writable page, then no writable refs can be outstanding.
     //
-    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
+    if ( (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
          PGT_writable_page )
     {
         perfc_incrc(remove_write_not_writable);
@@ -2202,7 +2202,7 @@
     // How many outstanding writable PTEs for this page are there?
     //
     write_refs =
-        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
+        (mfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
     if ( write_refs && MFN_PINNED(readonly_gmfn) )
     {
         write_refs--;
@@ -2220,7 +2220,7 @@
 
          // Use the back pointer to locate the shadow page that can contain
          // the PTE of interest
-         if ( (predicted_smfn = 
pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
+         if ( (predicted_smfn = 
mfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
              found += remove_all_write_access_in_ptpage(
                  d, predicted_smfn, predicted_smfn, readonly_gpfn, 
readonly_gmfn, write_refs, 0);
              if ( found == write_refs )
@@ -2261,7 +2261,7 @@
     int i;
     u32 count = 0;
     int is_l1_shadow =
-        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
+        ((mfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(forbidden_gmfn, flags);
@@ -2278,7 +2278,7 @@
         if ( is_l1_shadow )
             shadow_put_page_from_l1e(ol2e, d);
         else /* must be an hl2 page */
-            put_page(pfn_to_page(forbidden_gmfn));
+            put_page(mfn_to_page(forbidden_gmfn));
     }
 
     unmap_domain_page(pl1e);
@@ -2361,7 +2361,7 @@
             // the new contents of the guest page iff this it has the right
             // page type.
             //
-            if ( stype != ( pfn_to_page(entry->gmfn)->u.inuse.type_info & 
PGT_type_mask) )
+            if ( stype != ( mfn_to_page(entry->gmfn)->u.inuse.type_info & 
PGT_type_mask) )
                 continue;
         }
 
@@ -2398,12 +2398,12 @@
             if ( !smfn )
                 break;
 
-            min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
+            min_max_shadow = mfn_to_page(smfn)->tlbflush_timestamp;
             min_shadow     = SHADOW_MIN(min_max_shadow);
             max_shadow     = SHADOW_MAX(min_max_shadow);
 
             min_max_snapshot =
-                pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
+                mfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
             min_snapshot     = SHADOW_MIN(min_max_snapshot);
             max_snapshot     = SHADOW_MAX(min_max_snapshot);
 
@@ -2754,7 +2754,7 @@
             domain_crash_synchronous();
         }
 
-        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
+        __mark_dirty(d, gmfn_to_mfn(d, l2e_get_pfn(gpde)));
     }
 
     shadow_set_l1e(va, spte, 1);
@@ -2913,7 +2913,7 @@
 {
     struct domain *d = v->domain;
     unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
-    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
+    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
     unsigned long smfn, hl2mfn, old_smfn;
     int need_sync = 0;
 
@@ -3173,7 +3173,7 @@
         FAIL("global bit set in shadow");
 
     eff_guest_pfn = l1e_get_pfn(eff_guest_pte);
-    eff_guest_mfn = __gpfn_to_mfn(d, eff_guest_pfn);
+    eff_guest_mfn = gmfn_to_mfn(d, eff_guest_pfn);
     shadow_mfn = l1e_get_pfn(shadow_pte);
 
     if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
@@ -3190,7 +3190,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
+               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW coherence");
     }
@@ -3201,7 +3201,7 @@
     {
         printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx 
page_table_page=%d\n",
                eff_guest_pfn, eff_guest_mfn, shadow_mfn,
-               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
+               mfn_to_page(eff_guest_mfn)->u.inuse.type_info,
                page_table_page);
         FAIL("RW2 coherence");
     }
@@ -3241,7 +3241,7 @@
     l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
     int errors = 0;
 
-    if ( page_out_of_sync(pfn_to_page(gmfn)) )
+    if ( page_out_of_sync(mfn_to_page(gmfn)) )
     {
         snapshot_mfn = __shadow_status(d, gpfn, PGT_snapshot);
         ASSERT(snapshot_mfn);
@@ -3281,13 +3281,13 @@
     int errors = 0;
     int limit;
 
-    if ( !oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != d) )
+    if ( !oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != d) )
         FAILPT("domain doesn't own page");
-    if ( oos_pdes && (page_get_owner(pfn_to_page(gmfn)) != NULL) )
+    if ( oos_pdes && (page_get_owner(mfn_to_page(gmfn)) != NULL) )
         FAILPT("bogus owner for snapshot page");
-    if ( page_get_owner(pfn_to_page(smfn)) != NULL )
+    if ( page_get_owner(mfn_to_page(smfn)) != NULL )
         FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
-               smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
+               smfn, page_get_owner(mfn_to_page(smfn))->domain_id);
 
 #if 0
     if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
@@ -3375,14 +3375,14 @@
     perfc_incrc(check_pagetable);
 
     ptbase_mfn = gptbase >> PAGE_SHIFT;
-    ptbase_pfn = __mfn_to_gpfn(d, ptbase_mfn);
+    ptbase_pfn = mfn_to_gmfn(d, ptbase_mfn);
 
     if ( !(smfn = __shadow_status(d, ptbase_pfn, PGT_base_page_table)) )
     {
         printk("%s-PT %lx not shadowed\n", s, gptbase);
         goto out;
     }
-    if ( page_out_of_sync(pfn_to_page(ptbase_mfn)) )
+    if ( page_out_of_sync(mfn_to_page(ptbase_mfn)) )
     {
         ptbase_mfn = __shadow_status(d, ptbase_pfn, PGT_snapshot);
         oos_pdes = 1;
@@ -3403,7 +3403,7 @@
     for ( i = 0; i < limit; i++ )
     {
         unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
-        unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
+        unsigned long gl1mfn = gmfn_to_mfn(d, gl1pfn);
         unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
 
         if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
@@ -3444,7 +3444,7 @@
         a = &d->arch.shadow_ht[i];
         while ( a && a->gpfn_and_flags )
         {
-            gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
+            gmfn = gmfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
 
             switch ( a->gpfn_and_flags & PGT_type_mask )
             {
@@ -3454,7 +3454,7 @@
                 break;
             case PGT_l2_shadow:
                 errors += check_l2_table(v, gmfn, a->smfn,
-                                         page_out_of_sync(pfn_to_page(gmfn)));
+                                         page_out_of_sync(mfn_to_page(gmfn)));
                 break;
             case PGT_l3_shadow:
             case PGT_l4_shadow:
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c      Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/shadow_public.c      Wed Feb  1 15:28:50 2006
@@ -170,14 +170,14 @@
 #if CONFIG_PAGING_LEVELS >=3
     if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
     {
-        struct pfn_info *page = pfn_to_page(smfn);
+        struct page_info *page = mfn_to_page(smfn);
         for ( i = 0; i < PDP_ENTRIES; i++ )
         {
             if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
                 free_fake_shadow_l2(d,entry_get_pfn(ple[i]));
         }
 
-        page = pfn_to_page(entry_get_pfn(ple[0]));
+        page = mfn_to_page(entry_get_pfn(ple[0]));
         free_domheap_pages(page, SL2_ORDER);
         unmap_domain_page(ple);
     }
@@ -210,7 +210,7 @@
                     break;
                 if ( level == PAGING_L2 )
                 {
-                    struct pfn_info *page = pfn_to_page(smfn);
+                    struct page_info *page = mfn_to_page(smfn);
                     if ( is_xen_l2_slot(page->u.inuse.type_info, i) )
                         continue;
                 }
@@ -234,7 +234,7 @@
  */
 static pagetable_t page_table_convert(struct domain *d)
 {
-    struct pfn_info *l4page, *l3page;
+    struct page_info *l4page, *l3page;
     l4_pgentry_t *l4;
     l3_pgentry_t *l3, *pae_l3;
     int i;
@@ -242,13 +242,13 @@
     l4page = alloc_domheap_page(NULL);
     if (l4page == NULL)
         domain_crash_synchronous();
-    l4 = map_domain_page(page_to_pfn(l4page));
+    l4 = map_domain_page(page_to_mfn(l4page));
     memset(l4, 0, PAGE_SIZE);
 
     l3page = alloc_domheap_page(NULL);
     if (l3page == NULL)
         domain_crash_synchronous();
-    l3 = map_domain_page(page_to_pfn(l3page));
+    l3 = map_domain_page(page_to_mfn(l3page));
     memset(l3, 0, PAGE_SIZE);
 
     l4[0] = l4e_from_page(l3page, __PAGE_HYPERVISOR);
@@ -261,14 +261,14 @@
     unmap_domain_page(l4);
     unmap_domain_page(l3);
 
-    return mk_pagetable(page_to_phys(l4page));
+    return mk_pagetable(page_to_maddr(l4page));
 }
 
 static void alloc_monitor_pagetable(struct vcpu *v)
 {
     unsigned long mmfn;
     l4_pgentry_t *mpl4e;
-    struct pfn_info *mmfn_info;
+    struct page_info *mmfn_info;
     struct domain *d = v->domain;
     pagetable_t phys_table;
 
@@ -277,7 +277,7 @@
     mmfn_info = alloc_domheap_page(NULL);
     ASSERT( mmfn_info );
 
-    mmfn = page_to_pfn(mmfn_info);
+    mmfn = page_to_mfn(mmfn_info);
     mpl4e = (l4_pgentry_t *) map_domain_page_global(mmfn);
     memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE);
     mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
@@ -302,7 +302,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page_global(v->arch.monitor_vtable);
-    free_domheap_page(pfn_to_page(mfn));
+    free_domheap_page(mfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -326,7 +326,7 @@
 {
     unsigned long mmfn;
     l2_pgentry_t *mpl2e;
-    struct pfn_info *mmfn_info;
+    struct page_info *mmfn_info;
     struct domain *d = v->domain;
     int i;
 
@@ -335,7 +335,7 @@
     mmfn_info = alloc_domheap_page(NULL);
     ASSERT(mmfn_info != NULL);
 
-    mmfn = page_to_pfn(mmfn_info);
+    mmfn = page_to_mfn(mmfn_info);
     mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
     memset(mpl2e, 0, PAGE_SIZE);
 
@@ -399,7 +399,7 @@
      */
     mfn = pagetable_get_pfn(v->arch.monitor_table);
     unmap_domain_page_global(v->arch.monitor_vtable);
-    free_domheap_page(pfn_to_page(mfn));
+    free_domheap_page(mfn_to_page(mfn));
 
     v->arch.monitor_table = mk_pagetable(0);
     v->arch.monitor_vtable = 0;
@@ -416,7 +416,7 @@
 
     // Clear the out_of_sync bit.
     //
-    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
+    clear_bit(_PGC_out_of_sync, &mfn_to_page(entry->gmfn)->count_info);
 
     // XXX Need to think about how to protect the domain's
     // information less expensively.
@@ -431,9 +431,9 @@
 void
 release_out_of_sync_entry(struct domain *d, struct out_of_sync_entry *entry)
 {
-    struct pfn_info *page;
-
-    page = pfn_to_page(entry->gmfn);
+    struct page_info *page;
+
+    page = mfn_to_page(entry->gmfn);
         
     // Decrement ref count of guest & shadow pages
     //
@@ -506,13 +506,13 @@
     if ( !shadow_mode_refcounts(d) )
         return;
 
-    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
+    ASSERT(mfn_to_page(gmfn)->count_info & PGC_page_table);
 
     if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
     {
-        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
-
-        if ( page_out_of_sync(pfn_to_page(gmfn)) )
+        clear_bit(_PGC_page_table, &mfn_to_page(gmfn)->count_info);
+
+        if ( page_out_of_sync(mfn_to_page(gmfn)) )
         {
             remove_out_of_sync_entries(d, gmfn);
         }
@@ -524,7 +524,7 @@
 {
     l1_pgentry_t *pl1e = map_domain_page(smfn);
     int i;
-    struct pfn_info *spage = pfn_to_page(smfn);
+    struct page_info *spage = mfn_to_page(smfn);
     u32 min_max = spage->tlbflush_timestamp;
     int min = SHADOW_MIN(min_max);
     int max;
@@ -561,7 +561,7 @@
     for ( i = 0; i < limit; i++ )
     {
         if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
-            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
+            put_page(mfn_to_page(l1e_get_pfn(hl2[i])));
     }
 
     unmap_domain_page(hl2);
@@ -605,11 +605,11 @@
 
 void free_shadow_page(unsigned long smfn)
 {
-    struct pfn_info *page = pfn_to_page(smfn);
+    struct page_info *page = mfn_to_page(smfn);
 
     unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
-    struct domain *d = page_get_owner(pfn_to_page(gmfn));
-    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
+    struct domain *d = page_get_owner(mfn_to_page(gmfn));
+    unsigned long gpfn = mfn_to_gmfn(d, gmfn);
     unsigned long type = page->u.inuse.type_info & PGT_type_mask;
 
     SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
@@ -670,7 +670,7 @@
 
     default:
         printk("Free shadow weird page type mfn=%lx type=%" PRtype_info "\n",
-               page_to_pfn(page), page->u.inuse.type_info);
+               page_to_mfn(page), page->u.inuse.type_info);
         break;
     }
 
@@ -885,7 +885,7 @@
     /* Now free the pre-zero'ed pages from the domain. */
     list_for_each_safe(list_ent, tmp, &d->arch.free_shadow_frames)
     {
-        struct pfn_info *page = list_entry(list_ent, struct pfn_info, list);
+        struct page_info *page = list_entry(list_ent, struct page_info, list);
 
         list_del(list_ent);
         perfc_decr(free_l1_pages);
@@ -1072,7 +1072,7 @@
         {
             // external guests provide their own memory for their P2M maps.
             //
-            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
+            ASSERT(d == page_get_owner(mfn_to_page(pagetable_get_pfn(
                 d->arch.phys_table))));
         }
     }
@@ -1086,7 +1086,7 @@
     if ( shadow_mode_refcounts(d) )
     {
         struct list_head *list_ent; 
-        struct pfn_info *page;
+        struct page_info *page;
 
         /*
          * Tear down its counts by disassembling its page-table-based refcounts
@@ -1114,7 +1114,7 @@
         for (list_ent = d->page_list.next; list_ent != &d->page_list; 
              list_ent = page->list.next) {
             
-            page = list_entry(list_ent, struct pfn_info, list);
+            page = list_entry(list_ent, struct page_info, list);
             if ( !get_page_type(page, PGT_writable_page) )
                 BUG();
             put_page_type(page);
@@ -1339,7 +1339,7 @@
     unsigned long tabpfn = pagetable_get_pfn(d->arch.phys_table);
     l2_pgentry_t *l2, l2e;
     l1_pgentry_t *l1;
-    struct pfn_info *l1page;
+    struct page_info *l1page;
     unsigned long va = pfn << PAGE_SHIFT;
 
     ASSERT(tabpfn != 0);
@@ -1355,7 +1355,7 @@
             return 0;
         }
 
-        l1 = map_domain_page_with_cache(page_to_pfn(l1page), l1cache);
+        l1 = map_domain_page_with_cache(page_to_mfn(l1page), l1cache);
         memset(l1, 0, PAGE_SIZE);
         unmap_domain_page_with_cache(l1, l1cache);
 
@@ -1375,7 +1375,7 @@
 alloc_p2m_table(struct domain *d)
 {
     struct list_head *list_ent;
-    struct pfn_info *page, *l2page;
+    struct page_info *page, *l2page;
     l2_pgentry_t *l2;
     unsigned long mfn, pfn;
     struct domain_mmap_cache l1cache, l2cache;
@@ -1387,16 +1387,16 @@
     domain_mmap_cache_init(&l1cache);
     domain_mmap_cache_init(&l2cache);
 
-    d->arch.phys_table = mk_pagetable(page_to_phys(l2page));
-    l2 = map_domain_page_with_cache(page_to_pfn(l2page), &l2cache);
+    d->arch.phys_table = mk_pagetable(page_to_maddr(l2page));
+    l2 = map_domain_page_with_cache(page_to_mfn(l2page), &l2cache);
     memset(l2, 0, PAGE_SIZE);
     unmap_domain_page_with_cache(l2, &l2cache);
 
     list_ent = d->page_list.next;
     while ( list_ent != &d->page_list )
     {
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
         pfn = get_pfn_from_mfn(mfn);
         ASSERT(pfn != INVALID_M2P_ENTRY);
         ASSERT(pfn < (1u<<20));
@@ -1409,8 +1409,8 @@
     list_ent = d->xenpage_list.next;
     while ( list_ent != &d->xenpage_list )
     {
-        page = list_entry(list_ent, struct pfn_info, list);
-        mfn = page_to_pfn(page);
+        page = list_entry(list_ent, struct page_info, list);
+        mfn = page_to_mfn(page);
         pfn = get_pfn_from_mfn(mfn);
         if ( (pfn != INVALID_M2P_ENTRY) &&
              (pfn < (1u<<20)) )
@@ -1429,7 +1429,7 @@
 
 void shadow_l1_normal_pt_update(
     struct domain *d,
-    physaddr_t pa, l1_pgentry_t gpte,
+    paddr_t pa, l1_pgentry_t gpte,
     struct domain_mmap_cache *cache)
 {
     unsigned long sl1mfn;    
@@ -1454,7 +1454,7 @@
 
 void shadow_l2_normal_pt_update(
     struct domain *d,
-    physaddr_t pa, l2_pgentry_t gpde,
+    paddr_t pa, l2_pgentry_t gpde,
     struct domain_mmap_cache *cache)
 {
     unsigned long sl2mfn;
@@ -1479,7 +1479,7 @@
 #if CONFIG_PAGING_LEVELS >= 3
 void shadow_l3_normal_pt_update(
     struct domain *d,
-    physaddr_t pa, l3_pgentry_t l3e,
+    paddr_t pa, l3_pgentry_t l3e,
     struct domain_mmap_cache *cache)
 {
     unsigned long sl3mfn;
@@ -1506,7 +1506,7 @@
 #if CONFIG_PAGING_LEVELS >= 4
 void shadow_l4_normal_pt_update(
     struct domain *d,
-    physaddr_t pa, l4_pgentry_t l4e,
+    paddr_t pa, l4_pgentry_t l4e,
     struct domain_mmap_cache *cache)
 {
     unsigned long sl4mfn;
@@ -1543,7 +1543,7 @@
              (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
         {
             unsigned long mfn = l1e_get_pfn(l1[i]);
-            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+            unsigned long gpfn = mfn_to_gmfn(d, mfn);
             ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
             l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
         }
@@ -1571,7 +1571,7 @@
              (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
         {
             unsigned long mfn = l2e_get_pfn(l2[i]);
-            unsigned long gpfn = __mfn_to_gpfn(d, mfn);
+            unsigned long gpfn = mfn_to_gmfn(d, mfn);
             ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
             l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
             translate_l1pgtable(d, p2m, mfn);
@@ -1648,7 +1648,7 @@
     int i;
     u32 count = 0;
     int is_l1_shadow =
-        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
+        ((mfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
          PGT_l1_shadow);
 
     match = l1e_from_pfn(forbidden_gmfn, flags);
@@ -1665,7 +1665,7 @@
         if ( is_l1_shadow )
             shadow_put_page_from_l1e(ol2e, d);
         else /* must be an hl2 page */
-            put_page(pfn_to_page(forbidden_gmfn));
+            put_page(mfn_to_page(forbidden_gmfn));
     }
 
     unmap_domain_page(pl1e);
@@ -1715,7 +1715,7 @@
 }
 
 void shadow_drop_references(
-    struct domain *d, struct pfn_info *page)
+    struct domain *d, struct page_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) ||
          ((page->u.inuse.type_info & PGT_count_mask) == 0) )
@@ -1723,21 +1723,21 @@
 
     /* XXX This needs more thought... */
     printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n",
-           __func__, page_to_pfn(page));
-    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
+           __func__, page_to_mfn(page));
+    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
            page->count_info, page->u.inuse.type_info);
 
     shadow_lock(d);
-    __shadow_remove_all_access(d, page_to_pfn(page));
+    __shadow_remove_all_access(d, page_to_mfn(page));
     shadow_unlock(d);
 
-    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
+    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
            page->count_info, page->u.inuse.type_info);
 }
 
 /* XXX Needs more thought. Neither pretty nor fast: a place holder. */
 void shadow_sync_and_drop_references(
-    struct domain *d, struct pfn_info *page)
+    struct domain *d, struct page_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) )
         return;
@@ -1745,9 +1745,9 @@
     shadow_lock(d);
 
     if ( page_out_of_sync(page) )
-        __shadow_sync_mfn(d, page_to_pfn(page));
-
-    __shadow_remove_all_access(d, page_to_pfn(page));
+        __shadow_sync_mfn(d, page_to_mfn(page));
+
+    __shadow_remove_all_access(d, page_to_mfn(page));
 
     shadow_unlock(d);
 }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/smpboot.c    Wed Feb  1 15:28:50 2006
@@ -98,7 +98,7 @@
 static unsigned long __init setup_trampoline(void)
 {
        memcpy(trampoline_base, trampoline_data, trampoline_end - 
trampoline_data);
-       return virt_to_phys(trampoline_base);
+       return virt_to_maddr(trampoline_base);
 }
 
 /*
@@ -1038,7 +1038,7 @@
         */
        CMOS_WRITE(0, 0xf);
 
-       *((volatile long *) phys_to_virt(0x467)) = 0;
+       *((volatile long *) maddr_to_virt(0x467)) = 0;
 
 #ifdef BOGOMIPS
        /*
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/x86_32/mm.c  Wed Feb  1 15:28:50 2006
@@ -34,11 +34,11 @@
 
 static unsigned long mpt_size;
 
-struct pfn_info *alloc_xen_pagetable(void)
+struct page_info *alloc_xen_pagetable(void)
 {
     extern int early_boot;
     extern unsigned long xenheap_phys_start;
-    struct pfn_info *pg;
+    struct page_info *pg;
 
     if ( !early_boot )
     {
@@ -46,12 +46,12 @@
         return ((v == NULL) ? NULL : virt_to_page(v));
     }
 
-    pg = phys_to_page(xenheap_phys_start);
+    pg = maddr_to_page(xenheap_phys_start);
     xenheap_phys_start += PAGE_SIZE;
     return pg;
 }
 
-void free_xen_pagetable(struct pfn_info *pg)
+void free_xen_pagetable(struct page_info *pg)
 {
     free_xenheap_page(page_to_virt(pg));
 }
@@ -65,7 +65,7 @@
 {
     void *ioremap_pt;
     unsigned long v;
-    struct pfn_info *pg;
+    struct page_info *pg;
     int i;
 
 #ifdef CONFIG_X86_PAE
@@ -149,20 +149,20 @@
     unsigned int i, j;
 
     /*
-     * We are rather picky about the layout of 'struct pfn_info'. The
+     * We are rather picky about the layout of 'struct page_info'. The
      * count_info and domain fields must be adjacent, as we perform atomic
      * 64-bit operations on them. Also, just for sanity, we assert the size
      * of the structure here.
      */
-    if ( (offsetof(struct pfn_info, u.inuse._domain) != 
-          (offsetof(struct pfn_info, count_info) + sizeof(u32))) ||
-         ((offsetof(struct pfn_info, count_info) & 7) != 0) ||
-         (sizeof(struct pfn_info) != 24) )
-    {
-        printk("Weird pfn_info layout (%ld,%ld,%d)\n",
-               offsetof(struct pfn_info, count_info),
-               offsetof(struct pfn_info, u.inuse._domain),
-               sizeof(struct pfn_info));
+    if ( (offsetof(struct page_info, u.inuse._domain) != 
+          (offsetof(struct page_info, count_info) + sizeof(u32))) ||
+         ((offsetof(struct page_info, count_info) & 7) != 0) ||
+         (sizeof(struct page_info) != 24) )
+    {
+        printk("Weird page_info layout (%ld,%ld,%d)\n",
+               offsetof(struct page_info, count_info),
+               offsetof(struct page_info, u.inuse._domain),
+               sizeof(struct page_info));
         BUG();
     }
 
@@ -173,7 +173,7 @@
             idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i]);
         for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
         {
-            struct pfn_info *page = pfn_to_page(m2p_start_mfn + j);
+            struct page_info *page = mfn_to_page(m2p_start_mfn + j);
             page->count_info = PGC_allocated | 1;
             /* Ensure it's only mapped read-only by domains. */
             page->u.inuse.type_info = PGT_gdt_page | 1;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Wed Feb  1 15:01:04 2006
+++ b/xen/arch/x86/x86_64/mm.c  Wed Feb  1 15:28:50 2006
@@ -30,7 +30,7 @@
 #include <asm/msr.h>
 #include <public/memory.h>
 
-struct pfn_info *alloc_xen_pagetable(void)
+struct page_info *alloc_xen_pagetable(void)
 {
     extern int early_boot;
     unsigned long pfn;
@@ -39,10 +39,10 @@
         return alloc_domheap_page(NULL);
 
     pfn = alloc_boot_pages(1, 1);
-    return ((pfn == 0) ? NULL : pfn_to_page(pfn));
-}
-
-void free_xen_pagetable(struct pfn_info *pg)
+    return ((pfn == 0) ? NULL : mfn_to_page(pfn));
+}
+
+void free_xen_pagetable(struct page_info *pg)
 {
     free_domheap_page(pg);
 }
@@ -78,7 +78,7 @@
     unsigned long i, mpt_size;
     l3_pgentry_t *l3_ro_mpt;
     l2_pgentry_t *l2_ro_mpt;
-    struct pfn_info *pg;
+    struct page_info *pg;
 
     idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
 
@@ -106,7 +106,7 @@
         if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
             panic("Not enough memory for m2p table\n");
         map_pages_to_xen(
-            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_pfn(pg), 
+            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg), 
             1UL << PAGETABLE_ORDER,
             PAGE_HYPERVISOR);
         memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55,
@@ -140,19 +140,19 @@
     l2_pgentry_t l2e;
 
     /*
-     * We are rather picky about the layout of 'struct pfn_info'. The
+     * We are rather picky about the layout of 'struct page_info'. The
      * count_info and domain fields must be adjacent, as we perform atomic
      * 64-bit operations on them.
      */
-    if ( ((offsetof(struct pfn_info, u.inuse._domain) != 
-           (offsetof(struct pfn_info, count_info) + sizeof(u32)))) ||
-         ((offsetof(struct pfn_info, count_info) & 7) != 0) ||
-         (sizeof(struct pfn_info) != 40) )
-    {
-        printk("Weird pfn_info layout (%ld,%ld,%ld)\n",
-               offsetof(struct pfn_info, count_info),
-               offsetof(struct pfn_info, u.inuse._domain),
-               sizeof(struct pfn_info));
+    if ( ((offsetof(struct page_info, u.inuse._domain) != 
+           (offsetof(struct page_info, count_info) + sizeof(u32)))) ||
+         ((offsetof(struct page_info, count_info) & 7) != 0) ||
+         (sizeof(struct page_info) != 40) )
+    {
+        printk("Weird page_info layout (%ld,%ld,%ld)\n",
+               offsetof(struct page_info, count_info),
+               offsetof(struct page_info, u.inuse._domain),
+               sizeof(struct page_info));
         for ( ; ; ) ;
     }
 
@@ -172,7 +172,7 @@
 
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
-            struct pfn_info *page = pfn_to_page(m2p_start_mfn + i);
+            struct page_info *page = mfn_to_page(m2p_start_mfn + i);
             page->count_info = PGC_allocated | 1;
             /* gdt to make sure it's only mapped read-only by non-privileged
                domains. */
diff -r a12e08eb0209 -r 0c94043f5c5b xen/common/grant_table.c
--- a/xen/common/grant_table.c  Wed Feb  1 15:01:04 2006
+++ b/xen/common/grant_table.c  Wed Feb  1 15:28:50 2006
@@ -237,7 +237,7 @@
         if ( !act->pin )
         {
             act->domid = sdom;
-            act->frame = __gpfn_to_mfn(rd, sha->frame);
+            act->frame = gmfn_to_mfn(rd, sha->frame);
         }
     }
     else if ( (act->pin & 0x80808080U) != 0 )
@@ -254,10 +254,10 @@
     spin_unlock(&rd->grant_table->lock);
 
     frame = act->frame;
-    if ( unlikely(!pfn_valid(frame)) ||
+    if ( unlikely(!mfn_valid(frame)) ||
          unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
-                    get_page(pfn_to_page(frame), rd) :
-                    get_page_and_type(pfn_to_page(frame), rd,
+                    get_page(mfn_to_page(frame), rd) :
+                    get_page_and_type(mfn_to_page(frame), rd,
                                       PGT_writable_page))) )
         PIN_FAIL(undo_out, GNTST_general_error,
                  "Could not pin the granted frame (%lx)!\n", frame);
@@ -268,16 +268,16 @@
         if ( rc != GNTST_okay )
         {
             if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
-                put_page_type(pfn_to_page(frame));
-            put_page(pfn_to_page(frame));
+                put_page_type(mfn_to_page(frame));
+            put_page(mfn_to_page(frame));
             goto undo_out;
         }
 
         if ( dev_hst_ro_flags & GNTMAP_device_map )
         {
-            (void)get_page(pfn_to_page(frame), rd);
+            (void)get_page(mfn_to_page(frame), rd);
             if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
-                get_page_type(pfn_to_page(frame), PGT_writable_page);
+                get_page_type(mfn_to_page(frame), PGT_writable_page);
         }
     }
 
@@ -407,12 +407,12 @@
             if ( flags & GNTMAP_readonly )
             {
                 act->pin -= GNTPIN_devr_inc;
-                put_page(pfn_to_page(frame));
+                put_page(mfn_to_page(frame));
             }
             else
             {
                 act->pin -= GNTPIN_devw_inc;
-                put_page_and_type(pfn_to_page(frame));
+                put_page_and_type(mfn_to_page(frame));
             }
         }
     }
@@ -427,12 +427,12 @@
         if ( flags & GNTMAP_readonly )
         {
             act->pin -= GNTPIN_hstr_inc;
-            put_page(pfn_to_page(frame));
+            put_page(mfn_to_page(frame));
         }
         else
         {
             act->pin -= GNTPIN_hstw_inc;
-            put_page_and_type(pfn_to_page(frame));
+            put_page_and_type(mfn_to_page(frame));
         }
     }
 
@@ -481,7 +481,7 @@
     gnttab_setup_table_t  op;
     struct domain        *d;
     int                   i;
-    unsigned long         gpfn;
+    unsigned long         gmfn;
 
     if ( count != 1 )
         return -EINVAL;
@@ -523,8 +523,8 @@
         (void)put_user(GNTST_okay, &uop->status);
         for ( i = 0; i < op.nr_frames; i++ )
         {
-            gpfn = gnttab_shared_gpfn(d, d->grant_table, i);
-            (void)put_user(gpfn, &op.frame_list[i]);
+            gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
+            (void)put_user(gmfn, &op.frame_list[i]);
         }
     }
 
@@ -568,7 +568,7 @@
     gt = d->grant_table;
     (void)put_user(GNTST_okay, &uop->status);
 
-    shared_mfn = virt_to_phys(d->grant_table->shared);
+    shared_mfn = virt_to_maddr(d->grant_table->shared);
 
     DPRINTK("Grant table for dom (%hu) MFN (%x)\n",
             op.dom, shared_mfn);
@@ -706,7 +706,7 @@
 {
     struct domain *d = current->domain;
     struct domain *e;
-    struct pfn_info *page;
+    struct page_info *page;
     int i;
     grant_entry_t *sha;
     gnttab_transfer_t gop;
@@ -723,7 +723,7 @@
         }
 
         /* Check the passed page frame for basic validity. */
-        if ( unlikely(!pfn_valid(gop.mfn)) )
+        if ( unlikely(!mfn_valid(gop.mfn)) )
         { 
             DPRINTK("gnttab_transfer: out-of-range %lx\n",
                     (unsigned long)gop.mfn);
@@ -731,8 +731,8 @@
             continue;
         }
 
-        mfn = __gpfn_to_mfn(d, gop.mfn);
-        page = pfn_to_page(mfn);
+        mfn = gmfn_to_mfn(d, gop.mfn);
+        page = mfn_to_page(mfn);
         if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
         { 
             DPRINTK("gnttab_transfer: xen frame %lx\n",
@@ -895,7 +895,7 @@
     memset(t->shared, 0, NR_GRANT_FRAMES * PAGE_SIZE);
 
     for ( i = 0; i < NR_GRANT_FRAMES; i++ )
-        gnttab_create_shared_mfn(d, t, i);
+        gnttab_create_shared_page(d, t, i);
 
     /* Okay, install the structure. */
     wmb(); /* avoid races with lock-free access to d->grant_table */
@@ -952,7 +952,7 @@
             {
                 BUG_ON(!(act->pin & GNTPIN_devr_mask));
                 act->pin -= GNTPIN_devr_inc;
-                put_page(pfn_to_page(act->frame));
+                put_page(mfn_to_page(act->frame));
             }
 
             if ( map->ref_and_flags & GNTMAP_host_map )
@@ -960,7 +960,7 @@
                 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
                 act->pin -= GNTPIN_hstr_inc;
                 /* Done implicitly when page tables are destroyed. */
-                /* put_page(pfn_to_page(act->frame)); */
+                /* put_page(mfn_to_page(act->frame)); */
             }
         }
         else
@@ -969,7 +969,7 @@
             {
                 BUG_ON(!(act->pin & GNTPIN_devw_mask));
                 act->pin -= GNTPIN_devw_inc;
-                put_page_and_type(pfn_to_page(act->frame));
+                put_page_and_type(mfn_to_page(act->frame));
             }
 
             if ( map->ref_and_flags & GNTMAP_host_map )
@@ -977,7 +977,7 @@
                 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
                 act->pin -= GNTPIN_hstw_inc;
                 /* Done implicitly when page tables are destroyed. */
-                /* put_page_and_type(pfn_to_page(act->frame)); */
+                /* put_page_and_type(mfn_to_page(act->frame)); */
             }
 
             if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
diff -r a12e08eb0209 -r 0c94043f5c5b xen/common/memory.c
--- a/xen/common/memory.c       Wed Feb  1 15:01:04 2006
+++ b/xen/common/memory.c       Wed Feb  1 15:28:50 2006
@@ -29,7 +29,7 @@
     unsigned int   flags,
     int           *preempted)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned long    i;
 
     if ( (extent_list != NULL) &&
@@ -59,7 +59,7 @@
 
         /* Inform the domain of the new page's machine address. */ 
         if ( (extent_list != NULL) &&
-             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
+             (__put_user(page_to_mfn(page), &extent_list[i]) != 0) )
             return i;
     }
 
@@ -75,7 +75,7 @@
     unsigned int   flags,
     int           *preempted)
 {
-    struct pfn_info *page;
+    struct page_info *page;
     unsigned long    i, j, pfn, mfn;
 
     if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
@@ -102,7 +102,7 @@
             goto out;
         }
 
-        mfn = page_to_pfn(page);
+        mfn = page_to_mfn(page);
 
         if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
             goto out;
@@ -136,8 +136,8 @@
     unsigned int   flags,
     int           *preempted)
 {
-    struct pfn_info *page;
-    unsigned long    i, j, gpfn, mfn;
+    struct page_info *page;
+    unsigned long    i, j, gmfn, mfn;
 
     if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
         return 0;
@@ -150,12 +150,12 @@
             return i;
         }
 
-        if ( unlikely(__get_user(gpfn, &extent_list[i]) != 0) )
+        if ( unlikely(__get_user(gmfn, &extent_list[i]) != 0) )
             return i;
 
         for ( j = 0; j < (1 << extent_order); j++ )
         {
-            mfn = __gpfn_to_mfn(d, gpfn + j);
+            mfn = gmfn_to_mfn(d, gmfn + j);
             if ( unlikely(mfn >= max_page) )
             {
                 DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
@@ -163,7 +163,7 @@
                 return i;
             }
             
-            page = pfn_to_page(mfn);
+            page = mfn_to_page(mfn);
             if ( unlikely(!get_page(page, d)) )
             {
                 DPRINTK("Bad page free for domain %u\n", d->domain_id);
@@ -176,7 +176,7 @@
             if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 put_page(page);
 
-            guest_physmap_remove_page(d, gpfn + j, mfn);
+            guest_physmap_remove_page(d, gmfn + j, mfn);
 
             put_page(page);
         }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Wed Feb  1 15:01:04 2006
+++ b/xen/common/page_alloc.c   Wed Feb  1 15:28:50 2006
@@ -132,7 +132,7 @@
  */
 
 /* Initialise allocator to handle up to @max_page pages. */
-physaddr_t init_boot_allocator(physaddr_t bitmap_start)
+paddr_t init_boot_allocator(paddr_t bitmap_start)
 {
     unsigned long bitmap_size;
 
@@ -145,7 +145,7 @@
     bitmap_size  = max_page / 8;
     bitmap_size += sizeof(unsigned long);
     bitmap_size  = round_pgup(bitmap_size);
-    alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
+    alloc_bitmap = (unsigned long *)maddr_to_virt(bitmap_start);
 
     /* All allocated by default. */
     memset(alloc_bitmap, ~0, bitmap_size);
@@ -153,7 +153,7 @@
     return bitmap_start + bitmap_size;
 }
 
-void init_boot_pages(physaddr_t ps, physaddr_t pe)
+void init_boot_pages(paddr_t ps, paddr_t pe)
 {
     unsigned long bad_pfn;
     char *p;
@@ -245,13 +245,13 @@
         if ( next_free )
             map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
         if ( curr_free )
-            free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0);
+            free_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 0);
     }
 }
 
 /* Hand the specified arbitrary page range to the specified heap zone. */
 void init_heap_pages(
-    unsigned int zone, struct pfn_info *pg, unsigned long nr_pages)
+    unsigned int zone, struct page_info *pg, unsigned long nr_pages)
 {
     unsigned long i;
 
@@ -263,10 +263,10 @@
 
 
 /* Allocate 2^@order contiguous pages. */
-struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order)
+struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order)
 {
     int i;
-    struct pfn_info *pg;
+    struct page_info *pg;
 
     ASSERT(zone < NR_ZONES);
 
@@ -285,7 +285,7 @@
     return NULL;
 
  found: 
-    pg = list_entry(heap[zone][i].next, struct pfn_info, list);
+    pg = list_entry(heap[zone][i].next, struct page_info, list);
     list_del(&pg->list);
 
     /* We may have to halve the chunk a number of times. */
@@ -296,7 +296,7 @@
         pg += 1 << i;
     }
     
-    map_alloc(page_to_pfn(pg), 1 << order);
+    map_alloc(page_to_mfn(pg), 1 << order);
     avail[zone] -= 1 << order;
 
     spin_unlock(&heap_lock);
@@ -307,7 +307,7 @@
 
 /* Free 2^@order set of pages. */
 void free_heap_pages(
-    unsigned int zone, struct pfn_info *pg, unsigned int order)
+    unsigned int zone, struct page_info *pg, unsigned int order)
 {
     unsigned long mask;
 
@@ -316,7 +316,7 @@
 
     spin_lock(&heap_lock);
 
-    map_free(page_to_pfn(pg), 1 << order);
+    map_free(page_to_mfn(pg), 1 << order);
     avail[zone] += 1 << order;
     
     /* Merge chunks as far as possible. */
@@ -324,10 +324,10 @@
     {
         mask = 1 << order;
 
-        if ( (page_to_pfn(pg) & mask) )
+        if ( (page_to_mfn(pg) & mask) )
         {
             /* Merge with predecessor block? */
-            if ( allocated_in_map(page_to_pfn(pg)-mask) ||
+            if ( allocated_in_map(page_to_mfn(pg)-mask) ||
                  (PFN_ORDER(pg-mask) != order) )
                 break;
             list_del(&(pg-mask)->list);
@@ -336,7 +336,7 @@
         else
         {
             /* Merge with successor block? */
-            if ( allocated_in_map(page_to_pfn(pg)+mask) ||
+            if ( allocated_in_map(page_to_mfn(pg)+mask) ||
                  (PFN_ORDER(pg+mask) != order) )
                 break;
             list_del(&(pg+mask)->list);
@@ -383,9 +383,9 @@
         /* Re-check page status with lock held. */
         if ( !allocated_in_map(pfn) )
         {
-            if ( IS_XEN_HEAP_FRAME(pfn_to_page(pfn)) )
+            if ( IS_XEN_HEAP_FRAME(mfn_to_page(pfn)) )
             {
-                p = page_to_virt(pfn_to_page(pfn));
+                p = page_to_virt(mfn_to_page(pfn));
                 memguard_unguard_range(p, PAGE_SIZE);
                 clear_page(p);
                 memguard_guard_range(p, PAGE_SIZE);
@@ -410,7 +410,7 @@
  * XEN-HEAP SUB-ALLOCATOR
  */
 
-void init_xenheap_pages(physaddr_t ps, physaddr_t pe)
+void init_xenheap_pages(paddr_t ps, paddr_t pe)
 {
     unsigned long flags;
 
@@ -419,17 +419,17 @@
     if ( pe <= ps )
         return;
 
-    memguard_guard_range(phys_to_virt(ps), pe - ps);
+    memguard_guard_range(maddr_to_virt(ps), pe - ps);
 
     /*
      * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
      * prevent merging of power-of-two blocks across the zone boundary.
      */
-    if ( !IS_XEN_HEAP_FRAME(phys_to_page(pe)) )
+    if ( !IS_XEN_HEAP_FRAME(maddr_to_page(pe)) )
         pe -= PAGE_SIZE;
 
     local_irq_save(flags);
-    init_heap_pages(MEMZONE_XEN, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+    init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
     local_irq_restore(flags);
 }
 
@@ -437,7 +437,7 @@
 void *alloc_xenheap_pages(unsigned int order)
 {
     unsigned long flags;
-    struct pfn_info *pg;
+    struct page_info *pg;
     int i;
 
     local_irq_save(flags);
@@ -484,7 +484,7 @@
  * DOMAIN-HEAP SUB-ALLOCATOR
  */
 
-void init_domheap_pages(physaddr_t ps, physaddr_t pe)
+void init_domheap_pages(paddr_t ps, paddr_t pe)
 {
     unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
 
@@ -496,19 +496,19 @@
     s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
     e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
     if ( s_dma < e_dma )
-        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
+        init_heap_pages(MEMZONE_DMADOM, mfn_to_page(s_dma), e_dma - s_dma);
 
     s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
     e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
     if ( s_nrm < e_nrm )
-        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
-}
-
-
-struct pfn_info *alloc_domheap_pages(
+        init_heap_pages(MEMZONE_DOM, mfn_to_page(s_nrm), e_nrm - s_nrm);
+}
+
+
+struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags)
 {
-    struct pfn_info *pg = NULL;
+    struct page_info *pg = NULL;
     cpumask_t mask;
     int i;
 
@@ -560,7 +560,7 @@
         DPRINTK("...or the domain is dying (%d)\n", 
                 !!test_bit(_DOMF_dying, &d->domain_flags));
         spin_unlock(&d->page_alloc_lock);
-        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
+        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
         return NULL;
     }
 
@@ -583,7 +583,7 @@
 }
 
 
-void free_domheap_pages(struct pfn_info *pg, unsigned int order)
+void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
     int            i, drop_dom_ref;
     struct domain *d = page_get_owner(pg);
@@ -624,7 +624,7 @@
 
         if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
         {
-            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
+            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
         }
         else
         {
@@ -646,7 +646,7 @@
         /* Freeing anonymous domain-heap pages. */
         for ( i = 0; i < (1 << order); i++ )
             pg[i].u.free.cpumask = CPU_MASK_NONE;
-        free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
+        free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
         drop_dom_ref = 0;
     }
 
@@ -669,7 +669,7 @@
 static void page_scrub_softirq(void)
 {
     struct list_head *ent;
-    struct pfn_info  *pg;
+    struct page_info  *pg;
     void             *p;
     int               i;
     s_time_t          start = NOW();
@@ -701,12 +701,12 @@
         /* Working backwards, scrub each page in turn. */
         while ( ent != &page_scrub_list )
         {
-            pg = list_entry(ent, struct pfn_info, list);
+            pg = list_entry(ent, struct page_info, list);
             ent = ent->prev;
-            p = map_domain_page(page_to_pfn(pg));
+            p = map_domain_page(page_to_mfn(pg));
             clear_page(p);
             unmap_domain_page(p);
-            free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0);
+            free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
         }
     } while ( (NOW() - start) < MILLISECS(1) );
 }
diff -r a12e08eb0209 -r 0c94043f5c5b xen/common/xmalloc.c
--- a/xen/common/xmalloc.c      Wed Feb  1 15:01:04 2006
+++ b/xen/common/xmalloc.c      Wed Feb  1 15:28:50 2006
@@ -21,8 +21,8 @@
 
 /*
  * TODO (Keir, 17/2/05):
- *  1. Use space in pfn_info to avoid xmalloc_hdr in allocated blocks.
- *  2. pfn_info points into free list to make xfree() O(1) complexity.
+ *  1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
+ *  2. page_info points into free list to make xfree() O(1) complexity.
  *  3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
  *     (Disadvantage is potentially greater internal fragmentation).
  */
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/config.h     Wed Feb  1 15:28:50 2006
@@ -49,7 +49,7 @@
 typedef int pid_t;
 
 // now needed for xen/include/mm.h
-typedef unsigned long physaddr_t;
+typedef unsigned long paddr_t;
 // from include/linux/kernel.h
 #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
 
@@ -212,8 +212,8 @@
 #define _atomic_read(v) ((v).counter)
 #define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, 
_atomic_read(old), _atomic_read(new)) })
 
-// see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
-#define pfn_info page
+// see include/asm-ia64/mm.h, handle remaining page_info uses until gone
+#define page_info page
 
 // see common/memory.c
 #define set_pfn_from_mfn(x,y)  do { } while (0)
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/grant_table.h
--- a/xen/include/asm-ia64/grant_table.h        Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/grant_table.h        Wed Feb  1 15:28:50 2006
@@ -12,12 +12,12 @@
 
 #define steal_page_for_grant_transfer(d, p)  0
 
-#define gnttab_create_shared_mfn(d, t, i) ((void)0)
+#define gnttab_create_shared_page(d, t, i) ((void)0)
 
-#define gnttab_shared_gpfn(d, t, i)                                     \
+#define gnttab_shared_gmfn(d, t, i)                                     \
     ( ((d) == dom0) ?                                                   \
-      ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i)) :               \
-      (map_domain_page((d), 1UL<<40, virt_to_phys((t)->shared)),        \
+      ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
+      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
        1UL << (40 - PAGE_SHIFT))                                        \
     )
 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/linux-xen/asm/io.h
--- a/xen/include/asm-ia64/linux-xen/asm/io.h   Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/io.h   Wed Feb  1 15:28:50 2006
@@ -80,13 +80,13 @@
  * Change virtual addresses to physical addresses and vv.
  */
 static inline unsigned long
-virt_to_phys (volatile void *address)
+virt_to_maddr (volatile void *address)
 {
        return (unsigned long) address - PAGE_OFFSET;
 }
 
 static inline void*
-phys_to_virt (unsigned long address)
+maddr_to_virt (unsigned long address)
 {
        return (void *) (address + PAGE_OFFSET);
 }
@@ -98,9 +98,9 @@
  * The following two macros are deprecated and scheduled for removal.
  * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
  */
-#define bus_to_virt    phys_to_virt
-#define virt_to_bus    virt_to_phys
-#define page_to_bus    page_to_phys
+#define bus_to_virt    maddr_to_virt
+#define virt_to_bus    virt_to_maddr
+#define page_to_bus    page_to_maddr
 
 # endif /* KERNEL */
 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Wed Feb  1 15:28:50 2006
@@ -86,28 +86,28 @@
 
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) mfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-extern int ia64_pfn_valid (unsigned long pfn);
+extern int ia64_mfn_valid (unsigned long pfn);
 #else
-# define ia64_pfn_valid(pfn) 1
+# define ia64_mfn_valid(pfn) 1
 #endif
 
 #ifndef CONFIG_DISCONTIGMEM
-# define pfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_pfn_valid(pfn))
-# define page_to_pfn(page)     ((unsigned long) (page - mem_map))
-# define pfn_to_page(pfn)      (mem_map + (pfn))
+# define mfn_valid(pfn)                (((pfn) < max_mapnr) && 
ia64_mfn_valid(pfn))
+# define page_to_mfn(page)     ((unsigned long) (page - mem_map))
+# define mfn_to_page(pfn)      (mem_map + (pfn))
 #else
 extern struct page *vmem_map;
 extern unsigned long max_low_pfn;
-# define pfn_valid(pfn)                (((pfn) < max_low_pfn) && 
ia64_pfn_valid(pfn))
-# define page_to_pfn(page)     ((unsigned long) (page - vmem_map))
-# define pfn_to_page(pfn)      (vmem_map + (pfn))
-#endif
-
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+# define mfn_valid(pfn)                (((pfn) < max_low_pfn) && 
ia64_mfn_valid(pfn))
+# define page_to_mfn(page)     ((unsigned long) (page - vmem_map))
+# define mfn_to_page(pfn)      (vmem_map + (pfn))
+#endif
+
+#define page_to_maddr(page)    (page_to_mfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)    mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 
 typedef union ia64_va {
        struct {
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-ia64/linux-xen/asm/pgalloc.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h      Wed Feb  1 15:28:50 2006
@@ -109,7 +109,7 @@
 static inline void
 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
 {
-       pmd_val(*pmd_entry) = page_to_phys(pte);
+       pmd_val(*pmd_entry) = page_to_maddr(pte);
 }
 
 static inline void
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-ia64/linux-xen/asm/pgtable.h
--- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h      Wed Feb  1 15:28:50 2006
@@ -235,7 +235,7 @@
 /* Extract pfn from pte.  */
 #define pte_pfn(_pte)          ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
 
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte(page, pgprot)   pfn_pte(page_to_mfn(page), (pgprot))
 
 /* This takes a physical page address that is used by the remapping functions 
*/
 #define mk_pte_phys(physpage, pgprot) \
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-ia64/linux-xen/asm/uaccess.h
--- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h      Wed Feb  1 15:28:50 2006
@@ -384,7 +384,7 @@
        struct page *page;
        char * ptr;
 
-       page = pfn_to_page(p >> PAGE_SHIFT);
+       page = mfn_to_page(p >> PAGE_SHIFT);
        if (PageUncached(page))
                ptr = (char *)p + __IA64_UNCACHED_OFFSET;
        else
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/linux/mmzone.h
--- a/xen/include/asm-ia64/linux/mmzone.h       Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/linux/mmzone.h       Wed Feb  1 15:28:50 2006
@@ -289,7 +289,7 @@
 #ifdef CONFIG_FLAT_NODE_MEM_MAP
 #define pgdat_page_nr(pgdat, pagenr)   ((pgdat)->node_mem_map + (pagenr))
 #else
-#define pgdat_page_nr(pgdat, pagenr)   pfn_to_page((pgdat)->node_start_pfn + 
(pagenr))
+#define pgdat_page_nr(pgdat, pagenr)   mfn_to_page((pgdat)->node_start_pfn + 
(pagenr))
 #endif
 #define nid_page_nr(nid, pagenr)       pgdat_page_nr(NODE_DATA(nid),(pagenr))
 
@@ -536,18 +536,18 @@
        return __nr_to_section(pfn_to_section_nr(pfn));
 }
 
-#define pfn_to_page(pfn)                                               \
+#define mfn_to_page(pfn)                                               \
 ({                                                                     \
        unsigned long __pfn = (pfn);                                    \
        __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn;        \
 })
-#define page_to_pfn(page)                                              \
+#define page_to_mfn(page)                                              \
 ({                                                                     \
        page - __section_mem_map_addr(__nr_to_section(                  \
                page_to_section(page)));                                \
 })
 
-static inline int pfn_valid(unsigned long pfn)
+static inline int mfn_valid(unsigned long pfn)
 {
        if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
                return 0;
@@ -568,7 +568,7 @@
        NODE_DATA(pfn_to_nid(pfn));                                     \
 })
 
-#define early_pfn_valid(pfn)   pfn_valid(pfn)
+#define early_mfn_valid(pfn)   mfn_valid(pfn)
 void sparse_init(void);
 #else
 #define sparse_init()  do {} while (0)
@@ -580,8 +580,8 @@
 #define early_pfn_in_nid(pfn, nid)     (1)
 #endif
 
-#ifndef early_pfn_valid
-#define early_pfn_valid(pfn)   (1)
+#ifndef early_mfn_valid
+#define early_mfn_valid(pfn)   (1)
 #endif
 
 void memory_present(int nid, unsigned long start, unsigned long end);
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/mm.h Wed Feb  1 15:28:50 2006
@@ -29,7 +29,7 @@
  * Per-page-frame information.
  * 
  * Every architecture must ensure the following:
- *  1. 'struct pfn_info' contains a 'struct list_head list'.
+ *  1. 'struct page_info' contains a 'struct list_head list'.
  *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
  */
 #define PFN_ORDER(_pfn)        ((_pfn)->u.free.order)
@@ -106,8 +106,8 @@
 /* 30-bit count of references to this frame. */
 #define PGC_count_mask      ((1U<<30)-1)
 
-#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
-                                && (page_to_phys(_pfn) >= xen_pstart))
+#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \
+                                && (page_to_maddr(_pfn) >= xen_pstart))
 
 static inline struct domain *unpickle_domptr(u32 _d)
 { return (_d == 0) ? NULL : __va(_d); }
@@ -120,7 +120,7 @@
 /* Dummy now */
 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
 
-extern struct pfn_info *frame_table;
+extern struct page_info *frame_table;
 extern unsigned long frame_table_size;
 extern struct list_head free_list;
 extern spinlock_t free_list_lock;
@@ -134,7 +134,7 @@
 #endif
 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
 
-static inline void put_page(struct pfn_info *page)
+static inline void put_page(struct page_info *page)
 {
 #ifdef VALIDATE_VT     // doesn't work with non-VTI in grant tables yet
     u32 nx, x, y = page->count_info;
@@ -151,7 +151,7 @@
 }
 
 /* count_info and ownership are checked atomically. */
-static inline int get_page(struct pfn_info *page,
+static inline int get_page(struct page_info *page,
                            struct domain *domain)
 {
 #ifdef VALIDATE_VT
@@ -165,7 +165,7 @@
            unlikely((nx & PGC_count_mask) == 0) ||     /* Count overflow? */
            unlikely((x >> 32) != _domain)) {           /* Wrong owner? */
            DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
-               page_to_pfn(page), domain, unpickle_domptr(domain),
+               page_to_mfn(page), domain, unpickle_domptr(domain),
                x, page->u.inuse.type_info);
            return 0;
        }
@@ -178,14 +178,14 @@
 /* No type info now */
 #define put_page_type(page)
 #define get_page_type(page, type) 1
-static inline void put_page_and_type(struct pfn_info *page)
+static inline void put_page_and_type(struct page_info *page)
 {
     put_page_type(page);
     put_page(page);
 }
 
 
-static inline int get_page_and_type(struct pfn_info *page,
+static inline int get_page_and_type(struct page_info *page,
                                     struct domain *domain,
                                     u32 type)
 {
@@ -366,7 +366,7 @@
 
 static inline void *lowmem_page_address(struct page *page)
 {
-       return __va(page_to_pfn(page) << PAGE_SHIFT);
+       return __va(page_to_mfn(page) << PAGE_SHIFT);
 }
 
 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -422,29 +422,29 @@
 * here. However if it's allocated by HV, we should access it directly
 */
 
-#define __mfn_to_gpfn(_d, mfn)                 \
+#define mfn_to_gmfn(_d, mfn)                   \
     machine_to_phys_mapping[(mfn)]
 
-#define __gpfn_to_mfn(_d, gpfn)                        \
-    __gpfn_to_mfn_foreign((_d), (gpfn))
+#define gmfn_to_mfn(_d, gpfn)                  \
+    gmfn_to_mfn_foreign((_d), (gpfn))
 
 #define __gpfn_invalid(_d, gpfn)                       \
        (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK)
 
-#define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
+#define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
 
 /* Return I/O type if trye */
 #define __gpfn_is_io(_d, gpfn)                         \
-       (__gpfn_valid(_d, gpfn) ?                       \
+       (__gmfn_valid(_d, gpfn) ?                       \
        (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
 
 #define __gpfn_is_mem(_d, gpfn)                                \
-       (__gpfn_valid(_d, gpfn) ?                       \
+       (__gmfn_valid(_d, gpfn) ?                       \
        ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == 
GPFN_MEM) : 0)
 
 
 #define __gpa_to_mpa(_d, gpa)   \
-    ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
+    ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
 
 /* Arch-specific portion of memory_op hypercall. */
 #define arch_memory_op(op, arg) (-ENOSYS)
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-ia64/xenpage.h
--- a/xen/include/asm-ia64/xenpage.h    Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-ia64/xenpage.h    Wed Feb  1 15:28:50 2006
@@ -5,20 +5,20 @@
 #error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM"
 #endif
 
-#undef pfn_valid
-#undef page_to_pfn
-#undef pfn_to_page
-# define pfn_valid(_pfn)       ((_pfn) < max_page)
-# define page_to_pfn(_page)    ((unsigned long) ((_page) - frame_table))
-# define pfn_to_page(_pfn)     (frame_table + (_pfn))
+#undef mfn_valid
+#undef page_to_mfn
+#undef mfn_to_page
+# define mfn_valid(_pfn)       ((_pfn) < max_page)
+# define page_to_mfn(_page)    ((unsigned long) ((_page) - frame_table))
+# define mfn_to_page(_pfn)     (frame_table + (_pfn))
 
-#undef page_to_phys
+#undef page_to_maddr
 #undef virt_to_page
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_maddr(page)    (page_to_mfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr)    mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 
-#define page_to_virt(_page)    phys_to_virt(page_to_phys(_page))
-#define phys_to_page(kaddr)    pfn_to_page(((kaddr) >> PAGE_SHIFT))
+#define page_to_virt(_page)    maddr_to_virt(page_to_maddr(_page))
+#define maddr_to_page(kaddr)   mfn_to_page(((kaddr) >> PAGE_SHIFT))
 
 #ifndef __ASSEMBLY__
 typedef union xen_va {
@@ -30,7 +30,7 @@
        void *p;
 } xen_va;
 
-static inline int get_order_from_bytes(physaddr_t size)
+static inline int get_order_from_bytes(paddr_t size)
 {
     int order;
     size = (size-1) >> PAGE_SHIFT;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/grant_table.h Wed Feb  1 15:28:50 2006
@@ -19,22 +19,22 @@
     unsigned long addr, unsigned long frame, unsigned int flags);
 
 int steal_page_for_grant_transfer(
-    struct domain *d, struct pfn_info *page);
+    struct domain *d, struct page_info *page);
 
-#define gnttab_create_shared_mfn(d, t, i)                                \
+#define gnttab_create_shared_page(d, t, i)                               \
     do {                                                                 \
         SHARE_PFN_WITH_DOMAIN(                                           \
             virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), (d)); \
         set_pfn_from_mfn(                                                \
-            (virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i),             \
+            (virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i),            \
             INVALID_M2P_ENTRY);                                          \
     } while ( 0 )
 
 #define gnttab_shared_mfn(d, t, i)                      \
-    ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i))
+    ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
 
-#define gnttab_shared_gpfn(d, t, i)                     \
-    (__mfn_to_gpfn(d, gnttab_shared_mfn(d, t, i)))
+#define gnttab_shared_gmfn(d, t, i)                     \
+    (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
 
 #define gnttab_log_dirty(d, f) mark_dirty((d), (f))
 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/io.h
--- a/xen/include/asm-x86/io.h  Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/io.h  Wed Feb  1 15:28:50 2006
@@ -4,52 +4,6 @@
 #include <xen/config.h>
 #include <xen/types.h>
 #include <asm/page.h>
-
-#define IO_SPACE_LIMIT 0xffff
-
-/**
- *  virt_to_phys    -   map virtual addresses to physical
- *  @address: address to remap
- *
- *  The returned physical address is the physical (CPU) mapping for
- *  the memory address given. It is only valid to use this function on
- *  addresses directly mapped or allocated via xmalloc.
- *
- *  This function does not give bus mappings for DMA transfers. In
- *  almost all conceivable cases a device driver should not be using
- *  this function
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
-    return __pa(address);
-}
-
-/**
- *  phys_to_virt    -   map physical address to virtual
- *  @address: address to remap
- *
- *  The returned virtual address is a current CPU mapping for
- *  the memory address given. It is only valid to use this function on
- *  addresses that have a kernel mapping
- *
- *  This function does not handle bus mappings for DMA transfers. In
- *  almost all conceivable cases a device driver should not be using
- *  this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
-    return __va(address);
-}
-
-/*
- * Change "struct pfn_info" to physical address.
- */
-#define page_to_phys(page)  ((physaddr_t)(page - frame_table) << PAGE_SHIFT)
-
-#define page_to_pfn(_page)  ((unsigned long)((_page) - frame_table))
-#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
 
 /* We don't need real ioremap() on Xen/x86. */
 #define ioremap(x,l) (__va(x))
@@ -60,13 +14,6 @@
 #define writeb(d,x) (*(volatile char *)(x) = (d))
 #define writew(d,x) (*(volatile short *)(x) = (d))
 #define writel(d,x) (*(volatile int *)(x) = (d))
-
-/*
- * IO bus memory addresses are also 1:1 with the physical address
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-#define page_to_bus page_to_phys
 
 #define __OUT1(s,x) \
 static inline void out##s(unsigned x value, unsigned short port) {
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-x86/mach-default/bios_ebda.h
--- a/xen/include/asm-x86/mach-default/bios_ebda.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/mach-default/bios_ebda.h      Wed Feb  1 15:28:50 2006
@@ -7,7 +7,7 @@
  */
 static inline unsigned int get_bios_ebda(void)
 {
-       unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
+       unsigned int address = *(unsigned short *)maddr_to_virt(0x40E);
        address <<= 4;
        return address; /* 0 means none */
 }
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-x86/mach-default/mach_wakecpu.h
--- a/xen/include/asm-x86/mach-default/mach_wakecpu.h   Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/mach-default/mach_wakecpu.h   Wed Feb  1 15:28:50 2006
@@ -8,8 +8,8 @@
 
 #define WAKE_SECONDARY_VIA_INIT
 
-#define TRAMPOLINE_LOW phys_to_virt(0x467)
-#define TRAMPOLINE_HIGH phys_to_virt(0x469)
+#define TRAMPOLINE_LOW maddr_to_virt(0x467)
+#define TRAMPOLINE_HIGH maddr_to_virt(0x469)
 
 #define boot_cpu_apicid boot_cpu_physical_apicid
 
diff -r a12e08eb0209 -r 0c94043f5c5b 
xen/include/asm-x86/mach-es7000/mach_wakecpu.h
--- a/xen/include/asm-x86/mach-es7000/mach_wakecpu.h    Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/mach-es7000/mach_wakecpu.h    Wed Feb  1 15:28:50 2006
@@ -23,8 +23,8 @@
 }
 #endif
 
-#define TRAMPOLINE_LOW phys_to_virt(0x467)
-#define TRAMPOLINE_HIGH phys_to_virt(0x469)
+#define TRAMPOLINE_LOW maddr_to_virt(0x467)
+#define TRAMPOLINE_HIGH maddr_to_virt(0x469)
 
 #define boot_cpu_apicid boot_cpu_physical_apicid
 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/mm.h  Wed Feb  1 15:28:50 2006
@@ -12,12 +12,12 @@
  * Per-page-frame information.
  * 
  * Every architecture must ensure the following:
- *  1. 'struct pfn_info' contains a 'struct list_head list'.
+ *  1. 'struct page_info' contains a 'struct list_head list'.
  *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
  */
 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
 
-struct pfn_info
+struct page_info
 {
     /* Each frame can be threaded onto a doubly-linked list. */
     struct list_head list;
@@ -121,7 +121,7 @@
 #define PageSetSlab(page)   ((void)0)
 #define PageClearSlab(page) ((void)0)
 
-#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
+#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
 
 #if defined(__i386__)
 #define pickle_domptr(_d)   ((u32)(unsigned long)(_d))
@@ -154,20 +154,20 @@
         spin_unlock(&(_dom)->page_alloc_lock);                              \
     } while ( 0 )
 
-extern struct pfn_info *frame_table;
+extern struct page_info *frame_table;
 extern unsigned long max_page;
 extern unsigned long total_pages;
 void init_frametable(void);
 
-int alloc_page_type(struct pfn_info *page, unsigned long type);
-void free_page_type(struct pfn_info *page, unsigned long type);
+int alloc_page_type(struct page_info *page, unsigned long type);
+void free_page_type(struct page_info *page, unsigned long type);
 extern void invalidate_shadow_ldt(struct vcpu *d);
 extern int shadow_remove_all_write_access(
-    struct domain *d, unsigned long gpfn, unsigned long gmfn);
+    struct domain *d, unsigned long gmfn, unsigned long mfn);
 extern u32 shadow_remove_all_access( struct domain *d, unsigned long gmfn);
 extern int _shadow_mode_refcounts(struct domain *d);
 
-static inline void put_page(struct pfn_info *page)
+static inline void put_page(struct page_info *page)
 {
     u32 nx, x, y = page->count_info;
 
@@ -182,7 +182,7 @@
 }
 
 
-static inline int get_page(struct pfn_info *page,
+static inline int get_page(struct page_info *page,
                            struct domain *domain)
 {
     u32 x, nx, y = page->count_info;
@@ -199,7 +199,7 @@
         {
             if ( !_shadow_mode_refcounts(domain) )
                 DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%" 
PRtype_info "\n",
-                        page_to_pfn(page), domain, unpickle_domptr(d),
+                        page_to_mfn(page), domain, unpickle_domptr(d),
                         x, page->u.inuse.type_info);
             return 0;
         }
@@ -214,19 +214,19 @@
     return 1;
 }
 
-void put_page_type(struct pfn_info *page);
-int  get_page_type(struct pfn_info *page, unsigned long type);
+void put_page_type(struct page_info *page);
+int  get_page_type(struct page_info *page, unsigned long type);
 int  get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
 
-static inline void put_page_and_type(struct pfn_info *page)
+static inline void put_page_and_type(struct page_info *page)
 {
     put_page_type(page);
     put_page(page);
 }
 
 
-static inline int get_page_and_type(struct pfn_info *page,
+static inline int get_page_and_type(struct page_info *page,
                                     struct domain *domain,
                                     unsigned long type)
 {
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/page-guest32.h
--- a/xen/include/asm-x86/page-guest32.h        Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/page-guest32.h        Wed Feb  1 15:28:50 2006
@@ -34,9 +34,9 @@
 #define l2e_get_flags_32(x)           (get_pte_flags_32((x).l2))
 
 #define l1e_get_paddr_32(x)           \
-    ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
+    ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
 #define l2e_get_paddr_32(x)           \
-    ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
+    ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
 
 /* Construct an empty pte. */
 #define l1e_empty_32()                ((l1_pgentry_32_t) { 0 })
@@ -50,12 +50,12 @@
 
 /* Construct a pte from a physical address and access flags. */
 #ifndef __ASSEMBLY__
-static inline l1_pgentry_32_t l1e_from_paddr_32(physaddr_t pa, unsigned int 
flags)
+static inline l1_pgentry_32_t l1e_from_paddr_32(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l1_pgentry_32_t) { pa | put_pte_flags_32(flags) };
 }
-static inline l2_pgentry_32_t l2e_from_paddr_32(physaddr_t pa, unsigned int 
flags)
+static inline l2_pgentry_32_t l2e_from_paddr_32(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l2_pgentry_32_t) { pa | put_pte_flags_32(flags) };
@@ -64,8 +64,8 @@
 
 
 /* Construct a pte from a page pointer and access flags. */
-#define l1e_from_page_32(page, flags) 
(l1e_from_pfn_32(page_to_pfn(page),(flags)))
-#define l2e_from_page_32(page, flags) 
(l2e_from_pfn_32(page_to_pfn(page),(flags)))
+#define l1e_from_page_32(page, flags) 
(l1e_from_pfn_32(page_to_mfn(page),(flags)))
+#define l2e_from_page_32(page, flags) 
(l2e_from_pfn_32(page_to_mfn(page),(flags)))
 
 /* Add extra flags to an existing pte. */
 #define l1e_add_flags_32(x, flags)    ((x).l1 |= put_pte_flags_32(flags))
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/page.h        Wed Feb  1 15:28:50 2006
@@ -41,21 +41,21 @@
 #define l4e_get_pfn(x)             \
     ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
 
-/* Get physical address of page mapped by pte (physaddr_t). */
+/* Get physical address of page mapped by pte (paddr_t). */
 #define l1e_get_paddr(x)           \
-    ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
+    ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
 #define l2e_get_paddr(x)           \
-    ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
+    ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
 #define l3e_get_paddr(x)           \
-    ((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
+    ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
 #define l4e_get_paddr(x)           \
-    ((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
-
-/* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
-#define l1e_get_page(x)           (pfn_to_page(l1e_get_pfn(x)))
-#define l2e_get_page(x)           (pfn_to_page(l2e_get_pfn(x)))
-#define l3e_get_page(x)           (pfn_to_page(l3e_get_pfn(x)))
-#define l4e_get_page(x)           (pfn_to_page(l4e_get_pfn(x)))
+    ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
+
+/* Get pointer to info structure of page mapped by pte (struct page_info *). */
+#define l1e_get_page(x)           (mfn_to_page(l1e_get_pfn(x)))
+#define l2e_get_page(x)           (mfn_to_page(l2e_get_pfn(x)))
+#define l3e_get_page(x)           (mfn_to_page(l3e_get_pfn(x)))
+#define l4e_get_page(x)           (mfn_to_page(l4e_get_pfn(x)))
 
 /* Get pte access flags (unsigned int). */
 #define l1e_get_flags(x)           (get_pte_flags((x).l1))
@@ -81,25 +81,25 @@
 
 /* Construct a pte from a physical address and access flags. */
 #ifndef __ASSEMBLY__
-static inline l1_pgentry_t l1e_from_paddr(physaddr_t pa, unsigned int flags)
+static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l1_pgentry_t) { pa | put_pte_flags(flags) };
 }
-static inline l2_pgentry_t l2e_from_paddr(physaddr_t pa, unsigned int flags)
+static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l2_pgentry_t) { pa | put_pte_flags(flags) };
 }
 #if CONFIG_PAGING_LEVELS >= 3
-static inline l3_pgentry_t l3e_from_paddr(physaddr_t pa, unsigned int flags)
+static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l3_pgentry_t) { pa | put_pte_flags(flags) };
 }
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
-static inline l4_pgentry_t l4e_from_paddr(physaddr_t pa, unsigned int flags)
+static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l4_pgentry_t) { pa | put_pte_flags(flags) };
@@ -114,10 +114,10 @@
 #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
 
 /* Construct a pte from a page pointer and access flags. */
-#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
-#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
-#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
-#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
+#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
+#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
+#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
+#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
 
 /* Add extra flags to an existing pte. */
 #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
@@ -172,7 +172,7 @@
 /* x86_64 */
 typedef struct { u64 pfn; } pagetable_t;
 #endif
-#define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
+#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
 #define pagetable_get_pfn(x)   ((x).pfn)
 #define mk_pagetable(pa)       \
     ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
@@ -181,16 +181,31 @@
 #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
 #define copy_page(_t,_f)    memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
 
+#define mfn_valid(mfn)      ((mfn) < max_page)
+
+/* Convert between Xen-heap virtual addresses and machine addresses. */
 #define PAGE_OFFSET         ((unsigned long)__PAGE_OFFSET)
-#define __pa(x)             ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x)             ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define pfn_to_page(_pfn)   (frame_table + (_pfn))
-#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
-#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
-#define pfn_valid(_pfn)     ((_pfn) < max_page)
-
-#define pfn_to_phys(pfn)    ((physaddr_t)(pfn) << PAGE_SHIFT)
-#define phys_to_pfn(pa)     ((unsigned long)((pa) >> PAGE_SHIFT))
+#define virt_to_maddr(va)   ((unsigned long)(va)-PAGE_OFFSET)
+#define maddr_to_virt(ma)   ((void *)((unsigned long)(ma)+PAGE_OFFSET))
+/* Shorthand versions of the above functions. */
+#define __pa(x)             (virt_to_maddr(x))
+#define __va(x)             (maddr_to_virt(x))
+
+/* Convert between machine frame numbers and page-info structures. */
+#define mfn_to_page(mfn)    (frame_table + (mfn))
+#define page_to_mfn(pg)     ((unsigned long)((pg) - frame_table))
+
+/* Convert between machine addresses and page-info structures. */
+#define maddr_to_page(ma)   (frame_table + ((ma) >> PAGE_SHIFT))
+#define page_to_maddr(pg)   ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
+
+/* Convert between Xen-heap virtual addresses and page-info structures. */
+#define virt_to_page(va)    (frame_table + (__pa(va) >> PAGE_SHIFT))
+#define page_to_virt(pg)    (maddr_to_virt(page_to_maddr(pg)))
+
+/* Convert between frame number and address formats.  */
+#define pfn_to_paddr(pfn)   ((paddr_t)(pfn) << PAGE_SHIFT)
+#define paddr_to_pfn(pa)    ((unsigned long)((pa) >> PAGE_SHIFT))
 
 /* High table entries are reserved by the hypervisor. */
 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
@@ -228,9 +243,9 @@
                      (LINEAR_PT_OFFSET >> (PAGETABLE_ORDER<<2))))
 
 #define linear_pg_table linear_l1_table
-#define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
-#define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
-#define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
+#define linear_l2_table(v) ((v)->arch.guest_vtable)
+#define linear_l3_table(v) ((v)->arch.guest_vl3table)
+#define linear_l4_table(v) ((v)->arch.guest_vl4table)
 
 #ifndef __ASSEMBLY__
 #if CONFIG_PAGING_LEVELS == 3
@@ -298,7 +313,7 @@
 
 #ifndef __ASSEMBLY__
 
-static inline int get_order_from_bytes(physaddr_t size)
+static inline int get_order_from_bytes(paddr_t size)
 {
     int order;
     size = (size-1) >> PAGE_SHIFT;
@@ -317,17 +332,17 @@
 }
 
 /* Allocator functions for Xen pagetables. */
-struct pfn_info *alloc_xen_pagetable(void);
-void free_xen_pagetable(struct pfn_info *pg);
+struct page_info *alloc_xen_pagetable(void);
+void free_xen_pagetable(struct page_info *pg);
 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
 
-/* Map physical page range in Xen virtual address space. */
+/* Map machine page range in Xen virtual address space. */
 #define MAP_SMALL_PAGES (1UL<<16) /* don't use superpages for the mapping */
 int
 map_pages_to_xen(
     unsigned long virt,
-    unsigned long pfn,
-    unsigned long nr_pfns,
+    unsigned long mfn,
+    unsigned long nr_mfns,
     unsigned long flags);
 
 #endif /* !__ASSEMBLY__ */
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/shadow.h      Wed Feb  1 15:28:50 2006
@@ -133,10 +133,10 @@
 extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype);
 
 extern void shadow_l1_normal_pt_update(struct domain *d,
-                                       physaddr_t pa, l1_pgentry_t l1e,
+                                       paddr_t pa, l1_pgentry_t l1e,
                                        struct domain_mmap_cache *cache);
 extern void shadow_l2_normal_pt_update(struct domain *d,
-                                       physaddr_t pa, l2_pgentry_t l2e,
+                                       paddr_t pa, l2_pgentry_t l2e,
                                        struct domain_mmap_cache *cache);
 #if CONFIG_PAGING_LEVELS >= 3
 #include <asm/page-guest32.h>
@@ -150,12 +150,12 @@
 
 extern unsigned long gva_to_gpa(unsigned long gva);
 extern void shadow_l3_normal_pt_update(struct domain *d,
-                                       physaddr_t pa, l3_pgentry_t l3e,
+                                       paddr_t pa, l3_pgentry_t l3e,
                                        struct domain_mmap_cache *cache);
 #endif
 #if CONFIG_PAGING_LEVELS >= 4
 extern void shadow_l4_normal_pt_update(struct domain *d,
-                                       physaddr_t pa, l4_pgentry_t l4e,
+                                       paddr_t pa, l4_pgentry_t l4e,
                                        struct domain_mmap_cache *cache);
 #endif
 extern int shadow_do_update_va_mapping(unsigned long va,
@@ -170,7 +170,7 @@
 static inline void update_hl2e(struct vcpu *v, unsigned long va);
 #endif
 
-static inline int page_is_page_table(struct pfn_info *page)
+static inline int page_is_page_table(struct page_info *page)
 {
     struct domain *owner = page_get_owner(page);
     u32 type_info;
@@ -184,23 +184,23 @@
 
 static inline int mfn_is_page_table(unsigned long mfn)
 {
-    if ( !pfn_valid(mfn) )
+    if ( !mfn_valid(mfn) )
         return 0;
 
-    return page_is_page_table(pfn_to_page(mfn));
-}
-
-static inline int page_out_of_sync(struct pfn_info *page)
+    return page_is_page_table(mfn_to_page(mfn));
+}
+
+static inline int page_out_of_sync(struct page_info *page)
 {
     return page->count_info & PGC_out_of_sync;
 }
 
 static inline int mfn_out_of_sync(unsigned long mfn)
 {
-    if ( !pfn_valid(mfn) )
+    if ( !mfn_valid(mfn) )
         return 0;
 
-    return page_out_of_sync(pfn_to_page(mfn));
+    return page_out_of_sync(mfn_to_page(mfn));
 }
 
 
@@ -283,12 +283,12 @@
 
 /************************************************************************/
 
-#define __mfn_to_gpfn(_d, mfn)                         \
+#define mfn_to_gmfn(_d, mfn)                         \
     ( (shadow_mode_translate(_d))                      \
       ? get_pfn_from_mfn(mfn)                          \
       : (mfn) )
 
-#define __gpfn_to_mfn(_d, gpfn)                        \
+#define gmfn_to_mfn(_d, gpfn)                        \
     ({                                                 \
         unlikely(shadow_mode_translate(_d))            \
         ? (likely(current->domain == (_d))             \
@@ -317,7 +317,7 @@
     unsigned long gpfn;    /* why is this here? */
     unsigned long gmfn;
     unsigned long snapshot_mfn;
-    physaddr_t writable_pl1e; /* NB: this is a machine address */
+    paddr_t writable_pl1e; /* NB: this is a machine address */
     unsigned long va;
 };
 
@@ -401,8 +401,8 @@
     if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
          !(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) &&
          (mfn = l1e_get_pfn(nl1e)) &&
-         pfn_valid(mfn) &&
-         (owner = page_get_owner(pfn_to_page(mfn))) &&
+         mfn_valid(mfn) &&
+         (owner = page_get_owner(mfn_to_page(mfn))) &&
          (d != owner) )
     {
         res = get_page_from_l1e(nl1e, owner);
@@ -432,7 +432,7 @@
 }
 
 static inline void
-shadow_put_page_type(struct domain *d, struct pfn_info *page)
+shadow_put_page_type(struct domain *d, struct page_info *page)
 {
     if ( !shadow_mode_refcounts(d) )
         return;
@@ -441,7 +441,7 @@
 }
 
 static inline int shadow_get_page(struct domain *d,
-                                  struct pfn_info *page,
+                                  struct page_info *page,
                                   struct domain *owner)
 {
     if ( !shadow_mode_refcounts(d) )
@@ -450,7 +450,7 @@
 }
 
 static inline void shadow_put_page(struct domain *d,
-                                   struct pfn_info *page)
+                                   struct page_info *page)
 {
     if ( !shadow_mode_refcounts(d) )
         return;
@@ -493,9 +493,9 @@
         SH_VLOG("mark_dirty OOR! mfn=%lx pfn=%lx max=%x (dom %p)",
                mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
         SH_VLOG("dom=%p caf=%08x taf=%" PRtype_info, 
-                page_get_owner(pfn_to_page(mfn)),
-                pfn_to_page(mfn)->count_info, 
-                pfn_to_page(mfn)->u.inuse.type_info );
+                page_get_owner(mfn_to_page(mfn)),
+                mfn_to_page(mfn)->count_info, 
+                mfn_to_page(mfn)->u.inuse.type_info );
     }
 #endif
 }
@@ -577,12 +577,12 @@
     if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
     {
         if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
-             !shadow_get_page(v->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
+             !shadow_get_page(v->domain, mfn_to_page(l1e_get_pfn(new_hl2e)),
                               v->domain) )
             new_hl2e = l1e_empty();
         if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
         {
-            shadow_put_page(v->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
+            shadow_put_page(v->domain, mfn_to_page(l1e_get_pfn(old_hl2e)));
             need_flush = 1;
         }
 
@@ -598,7 +598,7 @@
 }
 
 static inline void shadow_drop_references(
-    struct domain *d, struct pfn_info *page)
+    struct domain *d, struct page_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) ||
          ((page->u.inuse.type_info & PGT_count_mask) == 0) )
@@ -606,21 +606,21 @@
 
     /* XXX This needs more thought... */
     printk("%s: needing to call shadow_remove_all_access for mfn=%lx\n",
-           __func__, page_to_pfn(page));
-    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
+           __func__, page_to_mfn(page));
+    printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
            page->count_info, page->u.inuse.type_info);
 
     shadow_lock(d);
-    shadow_remove_all_access(d, page_to_pfn(page));
+    shadow_remove_all_access(d, page_to_mfn(page));
     shadow_unlock(d);
 
-    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page),
+    printk("After:  mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_mfn(page),
            page->count_info, page->u.inuse.type_info);
 }
 
 /* XXX Needs more thought. Neither pretty nor fast: a place holder. */
 static inline void shadow_sync_and_drop_references(
-    struct domain *d, struct pfn_info *page)
+    struct domain *d, struct page_info *page)
 {
     if ( likely(!shadow_mode_refcounts(d)) )
         return;
@@ -628,9 +628,9 @@
     shadow_lock(d);
 
     if ( page_out_of_sync(page) )
-        __shadow_sync_mfn(d, page_to_pfn(page));
-
-    shadow_remove_all_access(d, page_to_pfn(page));
+        __shadow_sync_mfn(d, page_to_mfn(page));
+
+    shadow_remove_all_access(d, page_to_mfn(page));
 
     shadow_unlock(d);
 }
@@ -647,7 +647,7 @@
     domain_mmap_cache_init(&c1);
     domain_mmap_cache_init(&c2);
     shadow_lock(d);
-    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
     set_p2m_entry(d, gpfn, mfn, &c1, &c2);
     set_pfn_from_mfn(mfn, gpfn);
     shadow_unlock(d);
@@ -666,7 +666,7 @@
     domain_mmap_cache_init(&c1);
     domain_mmap_cache_init(&c2);
     shadow_lock(d);
-    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
     set_p2m_entry(d, gpfn, -1, &c1, &c2);
     set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
     shadow_unlock(d);
@@ -684,22 +684,22 @@
 {
     u32 x, nx;
 
-    ASSERT(pfn_valid(smfn));
-
-    x = pfn_to_page(smfn)->count_info;
+    ASSERT(mfn_valid(smfn));
+
+    x = mfn_to_page(smfn)->count_info;
     nx = x + 1;
 
     if ( unlikely(nx == 0) )
     {
         printk("get_shadow_ref overflow, gmfn=%" PRtype_info  " smfn=%lx\n",
-               pfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
+               mfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
                smfn);
         BUG();
     }
     
     // Guarded by the shadow lock...
     //
-    pfn_to_page(smfn)->count_info = nx;
+    mfn_to_page(smfn)->count_info = nx;
 
     return 1;
 }
@@ -714,9 +714,9 @@
 {
     u32 x, nx;
 
-    ASSERT(pfn_valid(smfn));
-
-    x = pfn_to_page(smfn)->count_info;
+    ASSERT(mfn_valid(smfn));
+
+    x = mfn_to_page(smfn)->count_info;
     nx = x - 1;
 
     if ( unlikely(x == 0) )
@@ -724,14 +724,14 @@
         printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%" 
                PRtype_info "\n",
                smfn,
-               pfn_to_page(smfn)->count_info,
-               pfn_to_page(smfn)->u.inuse.type_info);
+               mfn_to_page(smfn)->count_info,
+               mfn_to_page(smfn)->u.inuse.type_info);
         BUG();
     }
 
     // Guarded by the shadow lock...
     //
-    pfn_to_page(smfn)->count_info = nx;
+    mfn_to_page(smfn)->count_info = nx;
 
     if ( unlikely(nx == 0) )
     {
@@ -742,9 +742,9 @@
 static inline void
 shadow_pin(unsigned long smfn)
 {
-    ASSERT( !(pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
-
-    pfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
+    ASSERT( !(mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
+
+    mfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
     if ( unlikely(!get_shadow_ref(smfn)) )
         BUG();
 }
@@ -752,9 +752,9 @@
 static inline void
 shadow_unpin(unsigned long smfn)
 {
-    ASSERT( (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
-
-    pfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
+    ASSERT( (mfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
+
+    mfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
     put_shadow_ref(smfn);
 }
 
@@ -770,9 +770,9 @@
 
         ASSERT(shadow_lock_is_acquired(d));
         gmfn = l1e_get_pfn(spte);
-        pfn_to_page(gmfn)->tlbflush_timestamp = smfn;
-        pfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
-        pfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << 
PGT_va_shift;
+        mfn_to_page(gmfn)->tlbflush_timestamp = smfn;
+        mfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
+        mfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << 
PGT_va_shift;
     }
 }
 
@@ -790,7 +790,7 @@
     l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte;
     unsigned long gpfn = l1e_get_pfn(gpte);
-    unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
+    unsigned long gmfn = gmfn_to_mfn(d, gpfn);
 
     //printk("l1pte_write_fault gmfn=%lx\n", gmfn);
 
@@ -825,7 +825,7 @@
     l1_pgentry_t gpte = *gpte_p;
     l1_pgentry_t spte = *spte_p;
     unsigned long pfn = l1e_get_pfn(gpte);
-    unsigned long mfn = __gpfn_to_mfn(d, pfn);
+    unsigned long mfn = gmfn_to_mfn(d, pfn);
 
     if ( unlikely(!VALID_MFN(mfn)) )
     {
@@ -862,7 +862,7 @@
 
     if ( ((guest_l1e_get_flags(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
           (_PAGE_PRESENT|_PAGE_ACCESSED)) &&
-         VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
+         VALID_MFN(mfn = gmfn_to_mfn(d, l1e_get_pfn(gpte))) )
     {
         spte = l1e_from_pfn(
             mfn, guest_l1e_get_flags(gpte) & ~(_PAGE_GLOBAL | _PAGE_AVAIL));
@@ -893,7 +893,7 @@
 
     if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
     {
-        mfn = __gpfn_to_mfn(d, pfn);
+        mfn = gmfn_to_mfn(d, pfn);
         if ( VALID_MFN(mfn) && (mfn < max_page) )
             hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
     }
@@ -979,7 +979,7 @@
             //
             perfc_incrc(validate_pte_changes2);
             if ( likely(l1e_get_flags(new_spte) & _PAGE_PRESENT) )
-                shadow_put_page_type(d, pfn_to_page(l1e_get_pfn(new_spte)));
+                shadow_put_page_type(d, mfn_to_page(l1e_get_pfn(new_spte)));
         }
         else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
                    _PAGE_PRESENT ) &&
@@ -1035,11 +1035,11 @@
         perfc_incrc(validate_hl2e_changes);
 
         if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
-             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), d) )
+             !get_page(mfn_to_page(l1e_get_pfn(new_hl2e)), d) )
             new_hl2e = l1e_empty();
         if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
         {
-            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
+            put_page(mfn_to_page(l1e_get_pfn(old_hl2e)));
             need_flush = 1;
         }
     }
@@ -1234,7 +1234,7 @@
     struct domain *d, unsigned long gpfn, unsigned long stype)
 {
     unsigned long gmfn = ((current->domain == d)
-                          ? __gpfn_to_mfn(d, gpfn)
+                          ? gmfn_to_mfn(d, gpfn)
                           : INVALID_MFN);
 
     ASSERT(shadow_lock_is_acquired(d));
@@ -1254,8 +1254,8 @@
             printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info 
" "
                    "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
                    d->domain_id, gpfn, gmfn, stype,
-                   pfn_to_page(gmfn)->count_info,
-                   pfn_to_page(gmfn)->u.inuse.type_info,
+                   mfn_to_page(gmfn)->count_info,
+                   mfn_to_page(gmfn)->u.inuse.type_info,
                    mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
             BUG();
         }
@@ -1407,7 +1407,7 @@
  found:
     // release ref to page
     if ( stype != PGT_writable_pred )
-        put_page(pfn_to_page(gmfn));
+        put_page(mfn_to_page(gmfn));
 
     shadow_audit(d, 0);
 }
@@ -1446,7 +1446,7 @@
     //       is given away by the domain?
     //
     if ( stype != PGT_writable_pred )
-        get_page(pfn_to_page(gmfn), d);
+        get_page(mfn_to_page(gmfn), d);
 
     /*
      * STEP 1. If page is already in the table, update it in place.
@@ -1459,7 +1459,7 @@
                 BUG(); // we should never replace entries into the hash table
             x->smfn = smfn;
             if ( stype != PGT_writable_pred )
-                put_page(pfn_to_page(gmfn)); // already had a ref...
+                put_page(mfn_to_page(gmfn)); // already had a ref...
             goto done;
         }
 
@@ -1535,7 +1535,7 @@
 void static inline
 shadow_update_min_max(unsigned long smfn, int index)
 {
-    struct pfn_info *sl1page = pfn_to_page(smfn);
+    struct page_info *sl1page = mfn_to_page(smfn);
     u32 min_max = sl1page->tlbflush_timestamp;
     int min = SHADOW_MIN(min_max);
     int max = SHADOW_MAX(min_max);
@@ -1634,8 +1634,8 @@
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned long mfn = __gpfn_to_mfn(d, gpfn);
-    u32 type = pfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
+    unsigned long mfn = gmfn_to_mfn(d, gpfn);
+    u32 type = mfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
 
     if ( shadow_mode_refcounts(d) &&
          (type == PGT_writable_page) )
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/shadow_public.h
--- a/xen/include/asm-x86/shadow_public.h       Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/shadow_public.h       Wed Feb  1 15:28:50 2006
@@ -22,14 +22,14 @@
 #ifndef _XEN_SHADOW_PUBLIC_H
 #define _XEN_SHADOW_PUBLIC_H
 #if CONFIG_PAGING_LEVELS >= 3
-#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
+#define MFN_PINNED(_x) (mfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
 
 extern int alloc_p2m_table(struct domain *d);
 
 extern void shadow_sync_and_drop_references(
-      struct domain *d, struct pfn_info *page);
+      struct domain *d, struct page_info *page);
 extern void shadow_drop_references(
-      struct domain *d, struct pfn_info *page);
+      struct domain *d, struct page_info *page);
 
 extern int shadow_set_guest_paging_levels(struct domain *d, int levels);
 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/asm-x86/types.h
--- a/xen/include/asm-x86/types.h       Wed Feb  1 15:01:04 2006
+++ b/xen/include/asm-x86/types.h       Wed Feb  1 15:28:50 2006
@@ -37,17 +37,17 @@
 typedef signed long long s64;
 typedef unsigned long long u64;
 #if defined(CONFIG_X86_PAE)
-typedef u64 physaddr_t;
-#define PRIphysaddr "016llx"
+typedef u64 paddr_t;
+#define PRIpaddr "016llx"
 #else
-typedef unsigned long physaddr_t;
-#define PRIphysaddr "08lx"
+typedef unsigned long paddr_t;
+#define PRIpaddr "08lx"
 #endif
 #elif defined(__x86_64__)
 typedef signed long s64;
 typedef unsigned long u64;
-typedef unsigned long physaddr_t;
-#define PRIphysaddr "016lx"
+typedef unsigned long paddr_t;
+#define PRIpaddr "016lx"
 #endif
 
 typedef unsigned long size_t;
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/xen/domain_page.h
--- a/xen/include/xen/domain_page.h     Wed Feb  1 15:01:04 2006
+++ b/xen/include/xen/domain_page.h     Wed Feb  1 15:28:50 2006
@@ -96,10 +96,10 @@
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(pfn)                phys_to_virt((pfn)<<PAGE_SHIFT)
+#define map_domain_page(pfn)                maddr_to_virt((pfn)<<PAGE_SHIFT)
 #define unmap_domain_page(va)               ((void)(va))
 
-#define map_domain_page_global(pfn)         phys_to_virt((pfn)<<PAGE_SHIFT)
+#define map_domain_page_global(pfn)         maddr_to_virt((pfn)<<PAGE_SHIFT)
 #define unmap_domain_page_global(va)        ((void)(va))
 
 struct domain_mmap_cache { 
diff -r a12e08eb0209 -r 0c94043f5c5b xen/include/xen/mm.h
--- a/xen/include/xen/mm.h      Wed Feb  1 15:01:04 2006
+++ b/xen/include/xen/mm.h      Wed Feb  1 15:28:50 2006
@@ -1,3 +1,29 @@
+/******************************************************************************
+ * include/xen/mm.h
+ * 
+ * Definitions for memory pages, frame numbers, addresses, allocations, etc.
+ * 
+ * Note that Xen must handle several different physical 'address spaces' and
+ * there is a consistent terminology for these:
+ * 
+ * 1. gpfn/gpaddr: A guest-specific pseudo-physical frame number or address.
+ * 2. gmfn/gmaddr: A machine address from the p.o.v. of a particular guest.
+ * 3. mfn/maddr:   A real machine frame number or address.
+ * 4. pfn/paddr:   Used in 'polymorphic' functions that work across all
+ *                 address spaces, depending on context. See the pagetable
+ *                 conversion macros in asm-x86/page.h for examples.
+ *                 Also 'paddr_t' is big enough to store any physical address.
+ * 
+ * This scheme provides consistent function and variable names even when
+ * different guests are running in different memory-management modes.
+ * 1. A guest running in auto-translated mode (e.g., shadow_mode_translate())
+ *    will have gpfn == gmfn and gmfn != mfn.
+ * 2. A paravirtualised x86 guest will have gpfn != gmfn and gmfn == mfn.
+ * 3. A paravirtualised guest with no pseudophysical overlay will have
+ *    gpfn == gpmfn == mfn.
+ * 
+ * Copyright (c) 2002-2006, K A Fraser <keir@xxxxxxxxxxxxx>
+ */
 
 #ifndef __XEN_MM_H__
 #define __XEN_MM_H__
@@ -8,34 +34,34 @@
 #include <xen/spinlock.h>
 
 struct domain;
-struct pfn_info;
+struct page_info;
 
 /* Boot-time allocator. Turns into generic allocator after bootstrap. */
-physaddr_t init_boot_allocator(physaddr_t bitmap_start);
-void init_boot_pages(physaddr_t ps, physaddr_t pe);
+paddr_t init_boot_allocator(paddr_t bitmap_start);
+void init_boot_pages(paddr_t ps, paddr_t pe);
 unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
 void end_boot_allocator(void);
 
 /* Generic allocator. These functions are *not* interrupt-safe. */
 void init_heap_pages(
-    unsigned int zone, struct pfn_info *pg, unsigned long nr_pages);
-struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order);
+    unsigned int zone, struct page_info *pg, unsigned long nr_pages);
+struct page_info *alloc_heap_pages(unsigned int zone, unsigned int order);
 void free_heap_pages(
-    unsigned int zone, struct pfn_info *pg, unsigned int order);
+    unsigned int zone, struct page_info *pg, unsigned int order);
 void scrub_heap_pages(void);
 
 /* Xen suballocator. These functions are interrupt-safe. */
-void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
+void init_xenheap_pages(paddr_t ps, paddr_t pe);
 void *alloc_xenheap_pages(unsigned int order);
 void free_xenheap_pages(void *v, unsigned int order);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
 
 /* Domain suballocator. These functions are *not* interrupt-safe.*/
-void init_domheap_pages(physaddr_t ps, physaddr_t pe);
-struct pfn_info *alloc_domheap_pages(
+void init_domheap_pages(paddr_t ps, paddr_t pe);
+struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags);
-void free_domheap_pages(struct pfn_info *pg, unsigned int order);
+void free_domheap_pages(struct page_info *pg, unsigned int order);
 unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
 #define free_domheap_page(p)  (free_domheap_pages(p,0))

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.