[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] x86/mm: Swap mfn_valid() to use mfn_t



On 16/02/17 20:07, Andrew Cooper wrote:
> Replace one opencoded mfn_eq() and some coding style issues on altered lines.
> Swap __mfn_valid() to being bool, although it can't be updated to take mfn_t
> because of include dependencies.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> ---
>  xen/arch/arm/mem_access.c                   |  2 +-
>  xen/arch/arm/mm.c                           |  2 +-
>  xen/arch/arm/p2m.c                          |  6 +++---
>  xen/arch/arm/setup.c                        |  3 ++-
>  xen/arch/x86/cpu/mcheck/mce.c               |  2 +-
>  xen/arch/x86/cpu/mcheck/vmce.c              |  2 +-
>  xen/arch/x86/cpu/vpmu.c                     |  2 +-
>  xen/arch/x86/debug.c                        |  2 +-
>  xen/arch/x86/domctl.c                       |  2 +-
>  xen/arch/x86/hvm/mtrr.c                     |  2 +-
>  xen/arch/x86/mm.c                           | 20 ++++++++++----------
>  xen/arch/x86/mm/hap/guest_walk.c            |  2 +-
>  xen/arch/x86/mm/hap/hap.c                   |  2 --
>  xen/arch/x86/mm/hap/nested_hap.c            |  2 --
>  xen/arch/x86/mm/mem_access.c                |  4 ++--
>  xen/arch/x86/mm/mem_sharing.c               |  4 +---
>  xen/arch/x86/mm/p2m-ept.c                   |  4 ++--
>  xen/arch/x86/mm/p2m-pod.c                   |  2 --
>  xen/arch/x86/mm/p2m-pt.c                    |  2 --
>  xen/arch/x86/mm/p2m.c                       |  2 --
>  xen/arch/x86/mm/paging.c                    |  2 --
>  xen/arch/x86/mm/shadow/private.h            |  2 --
>  xen/arch/x86/tboot.c                        |  4 ++--
>  xen/arch/x86/x86_64/mm.c                    | 16 ++++++++--------
>  xen/arch/x86/x86_64/traps.c                 | 14 +++++++-------
>  xen/common/grant_table.c                    | 10 +++++-----
>  xen/common/memory.c                         |  8 ++++----
>  xen/common/page_alloc.c                     | 12 ++++++------
>  xen/common/pdx.c                            |  2 +-
>  xen/drivers/passthrough/amd/iommu_guest.c   | 10 +++++-----
>  xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
>  xen/drivers/passthrough/vtd/dmar.c          |  2 +-
>  xen/drivers/passthrough/vtd/x86/vtd.c       |  2 +-
>  xen/include/asm-arm/mm.h                    |  4 ++--
>  xen/include/asm-arm/p2m.h                   |  2 +-
>  xen/include/asm-x86/p2m.h                   |  2 +-
>  xen/include/asm-x86/page.h                  |  2 +-
>  xen/include/xen/pdx.h                       |  2 +-
>  xen/include/xen/tmem_xen.h                  |  2 +-
>  39 files changed, 77 insertions(+), 92 deletions(-)
> 
> diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
> index 03b20c4..04b1506 100644
> --- a/xen/arch/arm/mem_access.c
> +++ b/xen/arch/arm/mem_access.c
> @@ -172,7 +172,7 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned 
> long flag,
>      if ( mfn_eq(mfn, INVALID_MFN) )
>          goto err;
>  
> -    if ( !mfn_valid(mfn_x(mfn)) )
> +    if ( !mfn_valid(mfn) )
>          goto err;
>  
>      /*
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 2d96423..f0a2edd 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -1350,7 +1350,7 @@ int replace_grant_host_mapping(unsigned long addr, 
> unsigned long mfn,
>  
>  bool is_iomem_page(mfn_t mfn)
>  {
> -    return !mfn_valid(mfn_x(mfn));
> +    return !mfn_valid(mfn);
>  }
>  
>  void clear_and_clean_page(struct page_info *page)
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 5e8f6cd..e36d075 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -648,7 +648,7 @@ static void p2m_put_l3_page(const lpae_t pte)
>      {
>          unsigned long mfn = pte.p2m.base;
>  
> -        ASSERT(mfn_valid(mfn));
> +        ASSERT(mfn_valid(_mfn(mfn)));
>          put_page(mfn_to_page(mfn));
>      }
>  }
> @@ -695,7 +695,7 @@ static void p2m_free_entry(struct p2m_domain *p2m,
>          p2m_flush_tlb_sync(p2m);
>  
>      mfn = _mfn(entry.p2m.base);
> -    ASSERT(mfn_valid(mfn_x(mfn)));
> +    ASSERT(mfn_valid(mfn));
>  
>      free_domheap_page(mfn_to_page(mfn_x(mfn)));
>  }
> @@ -1412,7 +1412,7 @@ struct page_info *get_page_from_gva(struct vcpu *v, 
> vaddr_t va,
>      if ( rc )
>          goto err;
>  
> -    if ( !mfn_valid(maddr >> PAGE_SHIFT) )
> +    if ( !mfn_valid(_mfn(maddr >> PAGE_SHIFT)) )
>          goto err;
>  
>      page = mfn_to_page(maddr >> PAGE_SHIFT);
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index 2bf4363..b25ad80 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -268,7 +268,8 @@ void __init discard_initial_modules(void)
>          if ( mi->module[i].kind == BOOTMOD_XEN )
>              continue;
>  
> -        if ( !mfn_valid(paddr_to_pfn(s)) || !mfn_valid(paddr_to_pfn(e)))
> +        if ( !mfn_valid(_mfn(paddr_to_pfn(s))) ||
> +             !mfn_valid(_mfn(paddr_to_pfn(e))))
>              continue;
>  
>          dt_unreserved_regions(s, e, init_domheap_pages, 0);
> diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
> index 894db5c..8061c2b 100644
> --- a/xen/arch/x86/cpu/mcheck/mce.c
> +++ b/xen/arch/x86/cpu/mcheck/mce.c
> @@ -226,7 +226,7 @@ static void mca_init_bank(enum mca_source who,
>          (mib->mc_status & MCi_STATUS_ADDRV) &&
>          (mc_check_addr(mib->mc_status, mib->mc_misc, MC_ADDR_PHYSICAL)) &&
>          (who == MCA_POLLER || who == MCA_CMCI_HANDLER) &&
> -        (mfn_valid(paddr_to_pfn(mib->mc_addr))))
> +        (mfn_valid(_mfn(paddr_to_pfn(mib->mc_addr)))))
>      {
>          struct domain *d;
>  
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
> index 5f002e3..e69eeaa 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -436,7 +436,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
> unsigned long gfn)
>      if ( is_hardware_domain(d) )
>          return 0;
>  
> -    if (!mfn_valid(mfn_x(mfn)))
> +    if ( !mfn_valid(mfn) )
>          return -EINVAL;
>  
>      if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index b271c2c..c8615e8 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -634,7 +634,7 @@ static void pvpmu_finish(struct domain *d, 
> xen_pmu_params_t *params)
>      if ( xenpmu_data )
>      {
>          mfn = domain_page_map_to_mfn(xenpmu_data);
> -        ASSERT(mfn_valid(mfn));
> +        ASSERT(mfn_valid(_mfn(mfn)));
>          unmap_domain_page_global(xenpmu_data);
>          put_page_and_type(mfn_to_page(mfn));
>      }
> diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
> index 13d4ad7..499574e 100644
> --- a/xen/arch/x86/debug.c
> +++ b/xen/arch/x86/debug.c
> @@ -150,7 +150,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
> pgd3val)
>      DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, 
> l1_table_offset(vaddr),
>            l1e, mfn_x(mfn));
>  
> -    return mfn_valid(mfn_x(mfn)) ? mfn : INVALID_MFN;
> +    return mfn_valid(mfn) ? mfn : INVALID_MFN;
>  }
>  
>  /* Returns: number of bytes remaining to be copied */
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 3b5c3c9..364283e 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1250,7 +1250,7 @@ long arch_do_domctl(
>          unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
>          mfn_t mfn = get_gfn_query(d, pfn, &pt);
>  
> -        if ( unlikely(!mfn_valid(mfn_x(mfn))) || unlikely(!p2m_is_ram(pt)) )
> +        if ( unlikely(!mfn_valid(mfn)) || unlikely(!p2m_is_ram(pt)) )
>              ret = -EINVAL;
>          else
>              ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
> index 86c71be..c5c27cb 100644
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -792,7 +792,7 @@ int epte_get_entry_emt(struct domain *d, unsigned long 
> gfn, mfn_t mfn,
>          return MTRR_TYPE_WRBACK;
>      }
>  
> -    if ( !mfn_valid(mfn_x(mfn)) )
> +    if ( !mfn_valid(mfn) )
>      {
>          *ipat = 1;
>          return MTRR_TYPE_UNCACHABLE;
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 75bdbc3..cfa88b0 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -345,7 +345,7 @@ void __init arch_init_memory(void)
>          /* Mark as I/O up to next RAM region. */
>          for ( ; pfn < rstart_pfn; pfn++ )
>          {
> -            if ( !mfn_valid(pfn) )
> +            if ( !mfn_valid(_mfn(pfn)) )
>                  continue;
>              share_xen_page_with_guest(
>                  mfn_to_page(pfn), dom_io, XENSHARE_writable);
> @@ -689,7 +689,7 @@ static int get_page_from_pagenr(unsigned long page_nr, 
> struct domain *d)
>  {
>      struct page_info *page = mfn_to_page(page_nr);
>  
> -    if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
> +    if ( unlikely(!mfn_valid(_mfn(page_nr))) || unlikely(!get_page(page, d)) 
> )
>      {
>          MEM_LOG("Could not get page ref for pfn %lx", page_nr);
>          return 0;
> @@ -792,7 +792,7 @@ bool is_iomem_page(mfn_t mfn)
>  {
>      struct page_info *page;
>  
> -    if ( !mfn_valid(mfn_x(mfn)) )
> +    if ( !mfn_valid(mfn) )
>          return true;
>  
>      /* Caller must know that it is an iomem page, or a reference is held. */
> @@ -880,13 +880,13 @@ get_page_from_l1e(
>          return -EINVAL;
>      }
>  
> -    if ( !mfn_valid(mfn) ||
> +    if ( !mfn_valid(_mfn(mfn)) ||
>           (real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
>      {
>          int flip = 0;
>  
>          /* Only needed the reference to confirm dom_io ownership. */
> -        if ( mfn_valid(mfn) )
> +        if ( mfn_valid(_mfn(mfn)) )
>              put_page(page);
>  
>          /* DOMID_IO reverts to caller for privilege checks. */
> @@ -2764,7 +2764,7 @@ int get_superpage(unsigned long mfn, struct domain *d)
>  
>      ASSERT(opt_allow_superpage);
>  
> -    if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
> +    if ( !mfn_valid(_mfn(mfn | (L1_PAGETABLE_ENTRIES - 1))) )
>          return -EINVAL;
>  
>      spage = mfn_to_spage(mfn);
> @@ -3579,7 +3579,7 @@ long do_mmuext_op(
>                  MEM_LOG("Unaligned superpage reference mfn %lx", mfn);
>                  rc = -EINVAL;
>              }
> -            else if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
> +            else if ( !mfn_valid(_mfn(mfn | (L1_PAGETABLE_ENTRIES - 1))) )
>                  rc = -EINVAL;
>              else if ( op.cmd == MMUEXT_MARK_SUPER )
>                  rc = mark_superpage(mfn_to_spage(mfn), d);
> @@ -4865,7 +4865,7 @@ int xenmem_add_to_physmap_one(
>  
>      /* Remove previously mapped page if it was present. */
>      prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
> -    if ( mfn_valid(prev_mfn) )
> +    if ( mfn_valid(_mfn(prev_mfn)) )
>      {
>          if ( is_xen_heap_mfn(prev_mfn) )
>              /* Xen heap frames are simply unhooked from this phys slot. */
> @@ -5215,7 +5215,7 @@ static int ptwr_emulated_update(
>  
>      /* We are looking only for read-only mappings of p.t. pages. */
>      ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT);
> -    ASSERT(mfn_valid(mfn));
> +    ASSERT(mfn_valid(_mfn(mfn)));
>      ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table);
>      ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0);
>      ASSERT(page_get_owner(page) == d);
> @@ -5539,7 +5539,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long 
> addr,
>          return 0;
>  
>      mfn = l1e_get_pfn(pte);
> -    if ( mfn_valid(mfn) )
> +    if ( mfn_valid(_mfn(mfn)) )
>      {
>          struct page_info *page = mfn_to_page(mfn);
>          struct domain *owner = page_get_owner_and_reference(page);
> diff --git a/xen/arch/x86/mm/hap/guest_walk.c 
> b/xen/arch/x86/mm/hap/guest_walk.c
> index b38b851..569a495 100644
> --- a/xen/arch/x86/mm/hap/guest_walk.c
> +++ b/xen/arch/x86/mm/hap/guest_walk.c
> @@ -86,7 +86,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
>      top_mfn = _mfn(page_to_mfn(top_page));
>  
>      /* Map the top-level table and call the tree-walker */
> -    ASSERT(mfn_valid(mfn_x(top_mfn)));
> +    ASSERT(mfn_valid(top_mfn));
>      top_map = map_domain_page(top_mfn);
>  #if GUEST_PAGING_LEVELS == 3
>      top_map += (cr3 & ~(PAGE_MASK | 31));
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index b5870bf..9e58393 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -45,8 +45,6 @@
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/hap/nested_hap.c 
> b/xen/arch/x86/mm/hap/nested_hap.c
> index d41bb09..162afed 100644
> --- a/xen/arch/x86/mm/hap/nested_hap.c
> +++ b/xen/arch/x86/mm/hap/nested_hap.c
> @@ -71,8 +71,6 @@
>  /*        NESTED VIRT P2M FUNCTIONS         */
>  /********************************************/
>  /* Override macros from asm/page.h to make them work with mfn_t */
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index 34a994d..3ebeb4f 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -196,14 +196,14 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct 
> p2m_domain *hp2m,
>      mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
>  
>      /* Check host p2m if no valid entry in alternate */
> -    if ( !mfn_valid(mfn_x(mfn)) )
> +    if ( !mfn_valid(mfn) )
>      {
>  
>          mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
>                                      P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
>  
>          rc = -ESRCH;
> -        if ( !mfn_valid(mfn_x(mfn)) || t != p2m_ram_rw )
> +        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
>              return rc;
>  
>          /* If this is a superpage, copy that first */
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index db7f389..84fce18 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -154,8 +154,6 @@ static inline shr_handle_t get_next_handle(void)
>  
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> @@ -827,7 +825,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
>                  continue;
>  
>              amfn = get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma, 0, 
> NULL);
> -            if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != 
> p2ma) )
> +            if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) )
>              {
>                  altp2m_list_unlock(d);
>                  goto out;
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index ea614b0..568944f 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -70,7 +70,7 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr, 
> ept_entry_t new,
>          {
>              struct domain *fdom;
>  
> -            if ( !mfn_valid(new.mfn) )
> +            if ( !mfn_valid(_mfn(new.mfn)) )
>                  goto out;
>  
>              rc = -ESRCH;
> @@ -776,7 +776,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
> mfn_t mfn,
>          ept_entry = table + (gfn_remainder >> (i * EPT_TABLE_ORDER));
>      }
>  
> -    if ( mfn_valid(mfn_x(mfn)) || p2m_allows_invalid_mfn(p2mt) )
> +    if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>      {
>          int emt = epte_get_entry_emt(p2m->domain, gfn, mfn,
>                                       i * EPT_TABLE_ORDER, &ipat, 
> direct_mmio);
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index 367ee00..d5fea72 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -37,8 +37,6 @@
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index a23d0bd..bbfa54e 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -42,8 +42,6 @@
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 0c1820e..b53cee3 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -50,8 +50,6 @@ boolean_param("hap_2mb", opt_hap_2mb);
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index d964ed5..97e2780 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -50,8 +50,6 @@ DEFINE_PER_CPU(int, mm_lock_level);
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/mm/shadow/private.h 
> b/xen/arch/x86/mm/shadow/private.h
> index f0b0ed4..472676c 100644
> --- a/xen/arch/x86/mm/shadow/private.h
> +++ b/xen/arch/x86/mm/shadow/private.h
> @@ -469,8 +469,6 @@ void sh_reset_l3_up_pointers(struct vcpu *v);
>  /* Override macros from asm/page.h to make them work with mfn_t */
>  #undef mfn_to_page
>  #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
> -#undef mfn_valid
> -#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
>  #undef page_to_mfn
>  #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
>  
> diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
> index 4a573b0..eac182d 100644
> --- a/xen/arch/x86/tboot.c
> +++ b/xen/arch/x86/tboot.c
> @@ -187,7 +187,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
>      {
>          struct page_info *page = mfn_to_page(mfn);
>  
> -        if ( !mfn_valid(mfn) )
> +        if ( !mfn_valid(_mfn(mfn)) )
>              continue;
>          if ( is_page_in_use(page) && !is_xen_heap_page(page) )
>          {
> @@ -279,7 +279,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t 
> key[TB_KEY_SIZE],
>      {
>          struct page_info *page = __mfn_to_page(mfn);
>  
> -        if ( !mfn_valid(mfn) )
> +        if ( !mfn_valid(_mfn(mfn)) )
>              continue;
>          if ( (mfn << PAGE_SHIFT) < __pa(&_end) )
>              continue; /* skip Xen */
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 9ead02e..68c2a9e 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -65,7 +65,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
>      l3e = l3t[l3_table_offset(addr)];
>      unmap_domain_page(l3t);
>      mfn = l3e_get_pfn(l3e);
> -    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> +    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
>          return NULL;
>      if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
>      {
> @@ -77,7 +77,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
>      l2e = l2t[l2_table_offset(addr)];
>      unmap_domain_page(l2t);
>      mfn = l2e_get_pfn(l2e);
> -    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
>          return NULL;
>      if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
>      {
> @@ -89,7 +89,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
>      l1e = l1t[l1_table_offset(addr)];
>      unmap_domain_page(l1t);
>      mfn = l1e_get_pfn(l1e);
> -    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
>          return NULL;
>  
>   ret:
> @@ -366,7 +366,7 @@ static int setup_compat_m2p_table(struct mem_hotadd_info 
> *info)
>              continue;
>  
>          for ( n = 0; n < CNT; ++n)
> -            if ( mfn_valid(i + n * PDX_GROUP_COUNT) )
> +            if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) )
>                  break;
>          if ( n == CNT )
>              continue;
> @@ -436,7 +436,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
>          va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
>  
>          for ( n = 0; n < CNT; ++n)
> -            if ( mfn_valid(i + n * PDX_GROUP_COUNT) )
> +            if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) )
>                  break;
>          if ( n < CNT )
>          {
> @@ -554,7 +554,7 @@ void __init paging_init(void)
>              for ( holes = k = 0; k < 1 << PAGETABLE_ORDER; ++k)
>              {
>                  for ( n = 0; n < CNT; ++n)
> -                    if ( mfn_valid(MFN(i + k) + n * PDX_GROUP_COUNT) )
> +                    if ( mfn_valid(_mfn(MFN(i + k) + n * PDX_GROUP_COUNT)) )
>                          break;
>                  if ( n == CNT )
>                      ++holes;
> @@ -587,7 +587,7 @@ void __init paging_init(void)
>          }
>  
>          for ( n = 0; n < CNT; ++n)
> -            if ( mfn_valid(MFN(i) + n * PDX_GROUP_COUNT) )
> +            if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) )
>                  break;
>          if ( n == CNT )
>              l1_pg = NULL;
> @@ -653,7 +653,7 @@ void __init paging_init(void)
>          memflags = MEMF_node(phys_to_nid(i <<
>              (L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT)));
>          for ( n = 0; n < CNT; ++n)
> -            if ( mfn_valid(MFN(i) + n * PDX_GROUP_COUNT) )
> +            if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) )
>                  break;
>          if ( n == CNT )
>              continue;
> diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
> index 8d5f901..eab7758 100644
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -179,47 +179,47 @@ void show_page_walk(unsigned long addr)
>      l4e = l4t[l4_table_offset(addr)];
>      unmap_domain_page(l4t);
>      mfn = l4e_get_pfn(l4e);
> -    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
>            get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
>             l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
>      if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ||
> -         !mfn_valid(mfn) )
> +         !mfn_valid(_mfn(mfn)) )
>          return;
>  
>      l3t = map_domain_page(_mfn(mfn));
>      l3e = l3t[l3_table_offset(addr)];
>      unmap_domain_page(l3t);
>      mfn = l3e_get_pfn(l3e);
> -    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
>            get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n",
>             l3_table_offset(addr), l3e_get_intpte(l3e), pfn,
>             (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : "");
>      if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
>           (l3e_get_flags(l3e) & _PAGE_PSE) ||
> -         !mfn_valid(mfn) )
> +         !mfn_valid(_mfn(mfn)) )
>          return;
>  
>      l2t = map_domain_page(_mfn(mfn));
>      l2e = l2t[l2_table_offset(addr)];
>      unmap_domain_page(l2t);
>      mfn = l2e_get_pfn(l2e);
> -    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
>            get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
>             l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
>             (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
>      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
>           (l2e_get_flags(l2e) & _PAGE_PSE) ||
> -         !mfn_valid(mfn) )
> +         !mfn_valid(_mfn(mfn)) )
>          return;
>  
>      l1t = map_domain_page(_mfn(mfn));
>      l1e = l1t[l1_table_offset(addr)];
>      unmap_domain_page(l1t);
>      mfn = l1e_get_pfn(l1e);
> -    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
>            get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
>             l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index d3ea805..bce13e8 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -257,7 +257,7 @@ static int __get_paged_frame(unsigned long gfn, unsigned 
> long *frame, struct pag
>      *frame = page_to_mfn(*page);
>  #else
>      *frame = mfn_x(gfn_to_mfn(rd, _gfn(gfn)));
> -    *page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
> +    *page = mfn_valid(_mfn(*frame)) ? mfn_to_page(*frame) : NULL;
>      if ( (!(*page)) || (!get_page(*page, rd)) )
>      {
>          *frame = mfn_x(INVALID_MFN);
> @@ -878,7 +878,7 @@ __gnttab_map_grant_ref(
>      /* pg may be set, with a refcount included, from __get_paged_frame */
>      if ( !pg )
>      {
> -        pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
> +        pg = mfn_valid(_mfn(frame)) ? mfn_to_page(frame) : NULL;
>          if ( pg )
>              owner = page_get_owner_and_reference(pg);
>      }
> @@ -1792,7 +1792,7 @@ gnttab_transfer(
>  #endif
>  
>          /* Check the passed page frame for basic validity. */
> -        if ( unlikely(!mfn_valid(mfn)) )
> +        if ( unlikely(!mfn_valid(_mfn(mfn))) )
>          { 
>              put_gfn(d, gop.mfn);
>              gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
> @@ -2256,7 +2256,7 @@ __acquire_grant_for_copy(
>      }
>      else
>      {
> -        ASSERT(mfn_valid(act->frame));
> +        ASSERT(mfn_valid(_mfn(act->frame)));
>          *page = mfn_to_page(act->frame);
>          td = page_get_owner_and_reference(*page);
>          /*
> @@ -2935,7 +2935,7 @@ static int __gnttab_cache_flush(gnttab_cache_flush_t 
> *cflush,
>      d = rcu_lock_current_domain();
>      mfn = cflush->a.dev_bus_addr >> PAGE_SHIFT;
>  
> -    if ( !mfn_valid(mfn) )
> +    if ( !mfn_valid(_mfn(mfn)) )
>      {
>          rcu_unlock_domain(d);
>          return -EINVAL;
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index ad29b7b..ad0b33c 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -191,7 +191,7 @@ static void populate_physmap(struct memop_args *a)
>  
>                  for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
>                  {
> -                    if ( !mfn_valid(mfn) )
> +                    if ( !mfn_valid(_mfn(mfn)) )
>                      {
>                          gdprintk(XENLOG_INFO, "Invalid mfn 
> %#"PRI_xen_pfn"\n",
>                                   mfn);
> @@ -274,7 +274,7 @@ int guest_remove_page(struct domain *d, unsigned long 
> gmfn)
>           * actual page that needs to be released. */
>          if ( p2mt == p2m_ram_paging_out )
>          {
> -            ASSERT(mfn_valid(mfn_x(mfn)));
> +            ASSERT(mfn_valid(mfn));
>              page = mfn_to_page(mfn_x(mfn));
>              if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
>                  put_page(page);
> @@ -291,7 +291,7 @@ int guest_remove_page(struct domain *d, unsigned long 
> gmfn)
>  #else
>      mfn = gfn_to_mfn(d, _gfn(gmfn));
>  #endif
> -    if ( unlikely(!mfn_valid(mfn_x(mfn))) )
> +    if ( unlikely(!mfn_valid(mfn)) )
>      {
>          put_gfn(d, gmfn);
>          gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
> @@ -515,7 +515,7 @@ static long 
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
>  #else /* !CONFIG_X86 */
>                  mfn = mfn_x(gfn_to_mfn(d, _gfn(gmfn + k)));
>  #endif
> -                if ( unlikely(!mfn_valid(mfn)) )
> +                if ( unlikely(!mfn_valid(_mfn(mfn))) )
>                  {
>                      put_gfn(d, gmfn + k);
>                      rc = -EINVAL;
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index bbd7bc6..530ede1 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -985,7 +985,7 @@ static void free_heap_pages(
>          if ( (page_to_mfn(pg) & mask) )
>          {
>              /* Merge with predecessor block? */
> -            if ( !mfn_valid(page_to_mfn(pg-mask)) ||
> +            if ( !mfn_valid(_mfn(page_to_mfn(pg-mask))) ||
>                   !page_state_is(pg-mask, free) ||
>                   (PFN_ORDER(pg-mask) != order) ||
>                   (phys_to_nid(page_to_maddr(pg-mask)) != node) )
> @@ -996,7 +996,7 @@ static void free_heap_pages(
>          else
>          {
>              /* Merge with successor block? */
> -            if ( !mfn_valid(page_to_mfn(pg+mask)) ||
> +            if ( !mfn_valid(_mfn(page_to_mfn(pg+mask))) ||
>                   !page_state_is(pg+mask, free) ||
>                   (PFN_ORDER(pg+mask) != order) ||
>                   (phys_to_nid(page_to_maddr(pg+mask)) != node) )
> @@ -1082,7 +1082,7 @@ int offline_page(unsigned long mfn, int broken, 
> uint32_t *status)
>      struct domain *owner;
>      struct page_info *pg;
>  
> -    if ( !mfn_valid(mfn) )
> +    if ( !mfn_valid(_mfn(mfn)) )
>      {
>          dprintk(XENLOG_WARNING,
>                  "try to offline page out of range %lx\n", mfn);
> @@ -1191,7 +1191,7 @@ unsigned int online_page(unsigned long mfn, uint32_t 
> *status)
>      struct page_info *pg;
>      int ret;
>  
> -    if ( !mfn_valid(mfn) )
> +    if ( !mfn_valid(_mfn(mfn)) )
>      {
>          dprintk(XENLOG_WARNING, "call expand_pages() first\n");
>          return -EINVAL;
> @@ -1242,7 +1242,7 @@ int query_page_offline(unsigned long mfn, uint32_t 
> *status)
>  {
>      struct page_info *pg;
>  
> -    if ( !mfn_valid(mfn) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
> +    if ( !mfn_valid(_mfn(mfn)) || !page_is_ram_type(mfn, 
> RAM_TYPE_CONVENTIONAL) )
>      {
>          dprintk(XENLOG_WARNING, "call expand_pages() first\n");
>          return -EINVAL;
> @@ -1412,7 +1412,7 @@ static void __init smp_scrub_heap_pages(void *data)
>          pg = mfn_to_page(mfn);
>  
>          /* Check the mfn is valid and page is free. */
> -        if ( !mfn_valid(mfn) || !page_state_is(pg, free) )
> +        if ( !mfn_valid(_mfn(mfn)) || !page_state_is(pg, free) )
>              continue;
>  
>          scrub_one_page(pg);
> diff --git a/xen/common/pdx.c b/xen/common/pdx.c
> index 4baffd4..c991a09 100644
> --- a/xen/common/pdx.c
> +++ b/xen/common/pdx.c
> @@ -31,7 +31,7 @@ unsigned int __read_mostly pfn_pdx_hole_shift = 0;
>  unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
>      (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
>  
> -int __mfn_valid(unsigned long mfn)
> +bool __mfn_valid(unsigned long mfn)
>  {
>      return likely(mfn < max_page) &&
>             likely(!(mfn & pfn_hole_mask)) &&
> diff --git a/xen/drivers/passthrough/amd/iommu_guest.c 
> b/xen/drivers/passthrough/amd/iommu_guest.c
> index f96fbf4..96175bb 100644
> --- a/xen/drivers/passthrough/amd/iommu_guest.c
> +++ b/xen/drivers/passthrough/amd/iommu_guest.c
> @@ -199,7 +199,7 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 
> entry[])
>  
>      mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->ppr_log.reg_base),
>                                      sizeof(ppr_entry_t), tail);
> -    ASSERT(mfn_valid(mfn));
> +    ASSERT(mfn_valid(_mfn(mfn)));
>  
>      log_base = map_domain_page(_mfn(mfn));
>      log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
> @@ -248,7 +248,7 @@ void guest_iommu_add_event_log(struct domain *d, u32 
> entry[])
>  
>      mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->event_log.reg_base),
>                                      sizeof(event_entry_t), tail);
> -    ASSERT(mfn_valid(mfn));
> +    ASSERT(mfn_valid(_mfn(mfn)));
>  
>      log_base = map_domain_page(_mfn(mfn));
>      log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
> @@ -420,7 +420,7 @@ static int do_invalidate_dte(struct domain *d, 
> cmd_entry_t *cmd)
>      dte_mfn = guest_iommu_get_table_mfn(d,
>                                          
> reg_to_u64(g_iommu->dev_table.reg_base),
>                                          sizeof(dev_entry_t), gbdf);
> -    ASSERT(mfn_valid(dte_mfn));
> +    ASSERT(mfn_valid(_mfn(dte_mfn)));
>  
>      /* Read guest dte information */
>      dte_base = map_domain_page(_mfn(dte_mfn));
> @@ -441,7 +441,7 @@ static int do_invalidate_dte(struct domain *d, 
> cmd_entry_t *cmd)
>      gcr3_mfn = mfn_x(get_gfn(d, gcr3_gfn, &p2mt));
>      put_gfn(d, gcr3_gfn);
>  
> -    ASSERT(mfn_valid(gcr3_mfn));
> +    ASSERT(mfn_valid(_mfn(gcr3_mfn)));
>  
>      iommu = find_iommu_for_device(0, mbdf);
>      if ( !iommu )
> @@ -502,7 +502,7 @@ static void guest_iommu_process_command(unsigned long _d)
>          cmd_mfn = guest_iommu_get_table_mfn(d,
>                                              
> reg_to_u64(iommu->cmd_buffer.reg_base),
>                                              sizeof(cmd_entry_t), head);
> -        ASSERT(mfn_valid(cmd_mfn));
> +        ASSERT(mfn_valid(_mfn(cmd_mfn)));
>  
>          cmd_base = map_domain_page(_mfn(cmd_mfn));
>          cmd = cmd_base + head % entries_per_page;
> diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
> b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> index 94a25a4..b2428f8 100644
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
> @@ -293,7 +293,7 @@ static void __hwdom_init amd_iommu_hwdom_init(struct 
> domain *d)
>               * XXX Should we really map all non-RAM (above 4G)? Minimally
>               * a pfn_valid() check would seem desirable here.
>               */
> -            if ( mfn_valid(pfn) )
> +            if ( mfn_valid(_mfn(pfn)) )
>              {
>                  int ret = amd_iommu_map_page(d, pfn, pfn,
>                                               
> IOMMUF_readable|IOMMUF_writable);
> diff --git a/xen/drivers/passthrough/vtd/dmar.c 
> b/xen/drivers/passthrough/vtd/dmar.c
> index 9484f3b..16ae6f6 100644
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -939,7 +939,7 @@ static int __init add_user_rmrr(void)
>  
>          do
>          {
> -            if ( !mfn_valid(base) )
> +            if ( !mfn_valid(_mfn(base)) )
>              {
>                  printk(XENLOG_ERR VTDPREFIX
>                         "Invalid pfn in RMRR range "ERMRRU_FMT"\n",
> diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c 
> b/xen/drivers/passthrough/vtd/x86/vtd.c
> index 974b537..8a89f34 100644
> --- a/xen/drivers/passthrough/vtd/x86/vtd.c
> +++ b/xen/drivers/passthrough/vtd/x86/vtd.c
> @@ -129,7 +129,7 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
>          unsigned long pfn = pdx_to_pfn(i);
>  
>          if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ?
> -             (!mfn_valid(pfn) ||
> +             (!mfn_valid(_mfn(pfn)) ||
>                !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) :
>               iommu_inclusive_mapping ?
>               page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) :
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 3de911c..60ccbf3 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -128,7 +128,7 @@ extern vaddr_t xenheap_virt_start;
>  #else
>  #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
>  #define is_xen_heap_mfn(mfn) \
> -    (mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
> +    (mfn_valid(_mfn(mfn)) && is_xen_heap_page(__mfn_to_page(mfn)))
>  #endif
>  
>  #define is_xen_fixed_mfn(mfn)                                   \
> @@ -194,7 +194,7 @@ static inline void __iomem *ioremap_wc(paddr_t start, 
> size_t len)
>  
>  /* XXX -- account for base */
>  #define mfn_valid(mfn)        ({                                             
>  \
> -    unsigned long __m_f_n = (mfn);                                           
>  \
> +    unsigned long __m_f_n = mfn_x(mfn);                                      
>  \
>      likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && 
> __mfn_valid(__m_f_n)); \
>  })
>  
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 0905a3f..0899523 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -292,7 +292,7 @@ static inline struct page_info *get_page_from_gfn(
>      if ( !p2m_is_any_ram(p2mt) )
>          return NULL;
>  
> -    if ( !mfn_valid(mfn) )
> +    if ( !mfn_valid(_mfn(mfn)) )
>          return NULL;
>      page = mfn_to_page(mfn);
>  
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 5b1c566..470d29d 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -465,7 +465,7 @@ static inline struct page_info *get_page_from_gfn(
>      if (t)
>          *t = p2m_ram_rw;
>      page = __mfn_to_page(gfn);
> -    return mfn_valid(gfn) && get_page(page, d) ? page : NULL;
> +    return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
>  }
>  
>  
> diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
> index af7d3e8..46faffc 100644
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -252,7 +252,7 @@ void copy_page_sse2(void *, const void *);
>   * We define non-underscored wrappers for above conversion functions. These 
> are
>   * overridden in various source files while underscored versions remain 
> intact.
>   */
> -#define mfn_valid(mfn)      __mfn_valid(mfn)
> +#define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
>  #define virt_to_mfn(va)     __virt_to_mfn(va)
>  #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
>  #define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
> diff --git a/xen/include/xen/pdx.h b/xen/include/xen/pdx.h
> index c7c837e..4c56645 100644
> --- a/xen/include/xen/pdx.h
> +++ b/xen/include/xen/pdx.h
> @@ -21,7 +21,7 @@ extern void set_pdx_range(unsigned long smfn, unsigned long 
> emfn);
>  #define page_to_pdx(pg)  ((pg) - frame_table)
>  #define pdx_to_page(pdx) (frame_table + (pdx))
>  
> -extern int __mfn_valid(unsigned long mfn);
> +bool __mfn_valid(unsigned long mfn);
>  
>  static inline unsigned long pfn_to_pdx(unsigned long pfn)
>  {
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index 2b205bf..a6cab00 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -25,7 +25,7 @@
>  typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE 
> */
>  
>  #define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
> -#define IS_VALID_PAGE(_pi)  ( mfn_valid(page_to_mfn(_pi)) )
> +#define IS_VALID_PAGE(_pi)    mfn_valid(_mfn(page_to_mfn(_pi)))
>  
>  extern struct page_list_head tmem_page_list;
>  extern spinlock_t tmem_page_list_lock;
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.