[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 06/17] xen: Use a typesafe to define INVALID_MFN



On Tue, 28 Jun 2016, Julien Grall wrote:
> Also take the opportunity to convert arch/x86/debug.c to the typesafe
> mfn.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> 
> ---
> Cc: Christoph Egger <chegger@xxxxxxxxx>
> Cc: Liu Jinsong <jinsong.liu@xxxxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Tim Deegan <tim@xxxxxxx>
> 
>     Changes in v5:
>         - Patch added
> ---
>  xen/arch/arm/p2m.c              |  4 ++--

For the ARM bits:

Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>



>  xen/arch/x86/cpu/mcheck/mce.c   |  2 +-
>  xen/arch/x86/debug.c            | 50 ++++++++++++++++++++-------------------
>  xen/arch/x86/hvm/hvm.c          |  6 ++---
>  xen/arch/x86/hvm/viridian.c     |  6 ++---
>  xen/arch/x86/hvm/vmx/vmx.c      |  2 +-
>  xen/arch/x86/mm/guest_walk.c    |  4 ++--
>  xen/arch/x86/mm/hap/hap.c       |  4 ++--
>  xen/arch/x86/mm/p2m-ept.c       |  6 ++---
>  xen/arch/x86/mm/p2m-pod.c       | 18 +++++++-------
>  xen/arch/x86/mm/p2m-pt.c        | 18 +++++++-------
>  xen/arch/x86/mm/p2m.c           | 52 
> ++++++++++++++++++++---------------------
>  xen/arch/x86/mm/paging.c        | 12 +++++-----
>  xen/arch/x86/mm/shadow/common.c | 44 +++++++++++++++++-----------------
>  xen/arch/x86/mm/shadow/multi.c  | 36 ++++++++++++++--------------
>  xen/common/domain.c             |  6 ++---
>  xen/common/grant_table.c        |  6 ++---
>  xen/include/xen/mm.h            |  2 +-
>  18 files changed, 140 insertions(+), 138 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 34563bb..d690602 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1461,7 +1461,7 @@ int relinquish_p2m_mapping(struct domain *d)
>      return apply_p2m_changes(d, RELINQUISH,
>                                pfn_to_paddr(p2m->lowest_mapped_gfn),
>                                pfn_to_paddr(p2m->max_mapped_gfn),
> -                              pfn_to_paddr(INVALID_MFN),
> +                              pfn_to_paddr(mfn_x(INVALID_MFN)),
>                                MATTR_MEM, 0, p2m_invalid,
>                                d->arch.p2m.default_access);
>  }
> @@ -1476,7 +1476,7 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t 
> start_mfn, xen_pfn_t end_mfn)
>      return apply_p2m_changes(d, CACHEFLUSH,
>                               pfn_to_paddr(start_mfn),
>                               pfn_to_paddr(end_mfn),
> -                             pfn_to_paddr(INVALID_MFN),
> +                             pfn_to_paddr(mfn_x(INVALID_MFN)),
>                               MATTR_MEM, 0, p2m_invalid,
>                               d->arch.p2m.default_access);
>  }
> diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
> index edcbe48..2695b0c 100644
> --- a/xen/arch/x86/cpu/mcheck/mce.c
> +++ b/xen/arch/x86/cpu/mcheck/mce.c
> @@ -1455,7 +1455,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc)
>                  gfn = PFN_DOWN(gaddr);
>                  mfn = mfn_x(get_gfn(d, gfn, &t));
>  
> -                if ( mfn == INVALID_MFN )
> +                if ( mfn == mfn_x(INVALID_MFN) )
>                  {
>                      put_gfn(d, gfn);
>                      put_domain(d);
> diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
> index 58cae22..3479f7c 100644
> --- a/xen/arch/x86/debug.c
> +++ b/xen/arch/x86/debug.c
> @@ -43,11 +43,11 @@ typedef unsigned long dbgva_t;
>  typedef unsigned char dbgbyte_t;
>  
>  /* Returns: mfn for the given (hvm guest) vaddr */
> -static unsigned long 
> +static mfn_t
>  dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr,
>                  unsigned long *gfn)
>  {
> -    unsigned long mfn;
> +    mfn_t mfn;
>      uint32_t pfec = PFEC_page_present;
>      p2m_type_t gfntype;
>  
> @@ -60,16 +60,17 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int 
> toaddr,
>          return INVALID_MFN;
>      }
>  
> -    mfn = mfn_x(get_gfn(dp, *gfn, &gfntype)); 
> +    mfn = get_gfn(dp, *gfn, &gfntype);
>      if ( p2m_is_readonly(gfntype) && toaddr )
>      {
>          DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
>          mfn = INVALID_MFN;
>      }
>      else
> -        DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n", vaddr, dp->domain_id, mfn);
> +        DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n",
> +              vaddr, dp->domain_id, mfn_x(mfn));
>  
> -    if ( mfn == INVALID_MFN )
> +    if ( mfn_eq(mfn, INVALID_MFN) )
>      {
>          put_gfn(dp, *gfn);
>          *gfn = INVALID_GFN;
> @@ -91,7 +92,7 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr,
>   *       mode.
>   * Returns: mfn for the given (pv guest) vaddr 
>   */
> -static unsigned long 
> +static mfn_t
>  dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
>  {
>      l4_pgentry_t l4e, *l4t;
> @@ -99,31 +100,31 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
> pgd3val)
>      l2_pgentry_t l2e, *l2t;
>      l1_pgentry_t l1e, *l1t;
>      unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
> -    unsigned long mfn = cr3 >> PAGE_SHIFT;
> +    mfn_t mfn = _mfn(cr3 >> PAGE_SHIFT);
>  
>      DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, 
>            cr3, pgd3val);
>  
>      if ( pgd3val == 0 )
>      {
> -        l4t = map_domain_page(_mfn(mfn));
> +        l4t = map_domain_page(mfn);
>          l4e = l4t[l4_table_offset(vaddr)];
>          unmap_domain_page(l4t);
> -        mfn = l4e_get_pfn(l4e);
> +        mfn = _mfn(l4e_get_pfn(l4e));
>          DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t, 
> -              l4_table_offset(vaddr), l4e, mfn);
> +              l4_table_offset(vaddr), l4e, mfn_x(mfn));
>          if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
>          {
>              DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
>              return INVALID_MFN;
>          }
>  
> -        l3t = map_domain_page(_mfn(mfn));
> +        l3t = map_domain_page(mfn);
>          l3e = l3t[l3_table_offset(vaddr)];
>          unmap_domain_page(l3t);
> -        mfn = l3e_get_pfn(l3e);
> +        mfn = _mfn(l3e_get_pfn(l3e));
>          DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t, 
> -              l3_table_offset(vaddr), l3e, mfn);
> +              l3_table_offset(vaddr), l3e, mfn_x(mfn));
>          if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
>               (l3e_get_flags(l3e) & _PAGE_PSE) )
>          {
> @@ -132,26 +133,26 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, 
> uint64_t pgd3val)
>          }
>      }
>  
> -    l2t = map_domain_page(_mfn(mfn));
> +    l2t = map_domain_page(mfn);
>      l2e = l2t[l2_table_offset(vaddr)];
>      unmap_domain_page(l2t);
> -    mfn = l2e_get_pfn(l2e);
> +    mfn = _mfn(l2e_get_pfn(l2e));
>      DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
> -          l2e, mfn);
> +          l2e, mfn_x(mfn));
>      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
>           (l2e_get_flags(l2e) & _PAGE_PSE) )
>      {
>          DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
>          return INVALID_MFN;
>      }
> -    l1t = map_domain_page(_mfn(mfn));
> +    l1t = map_domain_page(mfn);
>      l1e = l1t[l1_table_offset(vaddr)];
>      unmap_domain_page(l1t);
> -    mfn = l1e_get_pfn(l1e);
> +    mfn = _mfn(l1e_get_pfn(l1e));
>      DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
> -          l1e, mfn);
> +          l1e, mfn_x(mfn));
>  
> -    return mfn_valid(mfn) ? mfn : INVALID_MFN;
> +    return mfn_valid(mfn_x(mfn)) ? mfn : INVALID_MFN;
>  }
>  
>  /* Returns: number of bytes remaining to be copied */
> @@ -163,23 +164,24 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * 
> __user gaddr,
>      {
>          char *va;
>          unsigned long addr = (unsigned long)gaddr;
> -        unsigned long mfn, gfn = INVALID_GFN, pagecnt;
> +        mfn_t mfn;
> +        unsigned long gfn = INVALID_GFN, pagecnt;
>  
>          pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
>  
>          mfn = (has_hvm_container_domain(dp)
>                 ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
>                 : dbg_pv_va2mfn(addr, dp, pgd3));
> -        if ( mfn == INVALID_MFN ) 
> +        if ( mfn_eq(mfn, INVALID_MFN) )
>              break;
>  
> -        va = map_domain_page(_mfn(mfn));
> +        va = map_domain_page(mfn);
>          va = va + (addr & (PAGE_SIZE-1));
>  
>          if ( toaddr )
>          {
>              copy_from_user(va, buf, pagecnt);    /* va = buf */
> -            paging_mark_dirty(dp, mfn);
> +            paging_mark_dirty(dp, mfn_x(mfn));
>          }
>          else
>          {
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index c89ab6e..a929e3b 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1796,7 +1796,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
> long gla,
>          p2m = hostp2m;
>  
>      /* Check access permissions first, then handle faults */
> -    if ( mfn_x(mfn) != INVALID_MFN )
> +    if ( !mfn_eq(mfn, INVALID_MFN) )
>      {
>          bool_t violation;
>  
> @@ -5299,8 +5299,8 @@ static int do_altp2m_op(
>              rc = -EINVAL;
>  
>          if ( (gfn_x(vcpu_altp2m(curr).veinfo_gfn) != INVALID_GFN) ||
> -             (mfn_x(get_gfn_query_unlocked(curr->domain,
> -                    a.u.enable_notify.gfn, &p2mt)) == INVALID_MFN) )
> +             (mfn_eq(get_gfn_query_unlocked(curr->domain,
> +                     a.u.enable_notify.gfn, &p2mt), INVALID_MFN)) )
>              return -EINVAL;
>  
>          vcpu_altp2m(curr).veinfo_gfn = _gfn(a.u.enable_notify.gfn);
> diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
> index 8253fd0..575acdd 100644
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -196,7 +196,7 @@ static void enable_hypercall_page(struct domain *d)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
> -                 page ? page_to_mfn(page) : INVALID_MFN);
> +                 page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
>          return;
>      }
>  
> @@ -269,7 +269,7 @@ static void initialize_apic_assist(struct vcpu *v)
>  
>   fail:
>      gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
> -             page ? page_to_mfn(page) : INVALID_MFN);
> +             page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
>  }
>  
>  static void teardown_apic_assist(struct vcpu *v)
> @@ -349,7 +349,7 @@ static void update_reference_tsc(struct domain *d, bool_t 
> initialize)
>          if ( page )
>              put_page(page);
>          gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
> -                 page ? page_to_mfn(page) : INVALID_MFN);
> +                 page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
>          return;
>      }
>  
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 9d4121e..888a6b1 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2025,7 +2025,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
>  
>              mfn = get_gfn_query_unlocked(d, 
> gfn_x(vcpu_altp2m(v).veinfo_gfn), &t);
>  
> -            if ( mfn_x(mfn) != INVALID_MFN )
> +            if ( !mfn_eq(mfn, INVALID_MFN) )
>                  __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
>              else
>                  v->arch.hvm_vmx.secondary_exec_control &=
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index e850502..868e909 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -281,7 +281,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>          start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) +
>                       ((va >> PAGE_SHIFT) & GUEST_L3_GFN_MASK));
>          gw->l1e = guest_l1e_from_gfn(start, flags);
> -        gw->l2mfn = gw->l1mfn = _mfn(INVALID_MFN);
> +        gw->l2mfn = gw->l1mfn = INVALID_MFN;
>          goto set_ad;
>      }
>  
> @@ -356,7 +356,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>          start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
>                       guest_l1_table_offset(va));
>          gw->l1e = guest_l1e_from_gfn(start, flags);
> -        gw->l1mfn = _mfn(INVALID_MFN);
> +        gw->l1mfn = INVALID_MFN;
>      } 
>      else 
>      {
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 9c2cd49..3218fa2 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -430,7 +430,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
>   oom:
>      HAP_ERROR("out of memory building monitor pagetable\n");
>      domain_crash(d);
> -    return _mfn(INVALID_MFN);
> +    return INVALID_MFN;
>  }
>  
>  static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
> @@ -509,7 +509,7 @@ int hap_enable(struct domain *d, u32 mode)
>          }
>  
>          for ( i = 0; i < MAX_EPTP; i++ )
> -            d->arch.altp2m_eptp[i] = INVALID_MFN;
> +            d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
>  
>          for ( i = 0; i < MAX_ALTP2M; i++ )
>          {
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index 7166c71..6d03736 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -50,7 +50,7 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr, 
> ept_entry_t new,
>                                    int level)
>  {
>      int rc;
> -    unsigned long oldmfn = INVALID_MFN;
> +    unsigned long oldmfn = mfn_x(INVALID_MFN);
>      bool_t check_foreign = (new.mfn != entryptr->mfn ||
>                              new.sa_p2mt != entryptr->sa_p2mt);
>  
> @@ -91,7 +91,7 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr, 
> ept_entry_t new,
>  
>      write_atomic(&entryptr->epte, new.epte);
>  
> -    if ( unlikely(oldmfn != INVALID_MFN) )
> +    if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
>          put_page(mfn_to_page(oldmfn));
>  
>      rc = 0;
> @@ -887,7 +887,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
>      int i;
>      int ret = 0;
>      bool_t recalc = 0;
> -    mfn_t mfn = _mfn(INVALID_MFN);
> +    mfn_t mfn = INVALID_MFN;
>      struct ept_data *ept = &p2m->ept;
>  
>      *t = p2m_mmio_dm;
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index b7ab169..f384589 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -559,7 +559,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>      {
>          /* All PoD: Mark the whole region invalid and tell caller
>           * we're done. */
> -        p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
> +        p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
>                        p2m->default_access);
>          p2m->pod.entry_count-=(1<<order);
>          BUG_ON(p2m->pod.entry_count < 0);
> @@ -602,7 +602,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>          n = 1UL << cur_order;
>          if ( t == p2m_populate_on_demand )
>          {
> -            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
> +            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
>                            p2m_invalid, p2m->default_access);
>              p2m->pod.entry_count -= n;
>              BUG_ON(p2m->pod.entry_count < 0);
> @@ -624,7 +624,7 @@ p2m_pod_decrease_reservation(struct domain *d,
>  
>              page = mfn_to_page(mfn);
>  
> -            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
> +            p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
>                            p2m_invalid, p2m->default_access);
>              p2m_tlb_flush_sync(p2m);
>              for ( j = 0; j < n; ++j )
> @@ -671,7 +671,7 @@ void p2m_pod_dump_data(struct domain *d)
>  static int
>  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
>  {
> -    mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
> +    mfn_t mfn, mfn0 = INVALID_MFN;
>      p2m_type_t type, type0 = 0;
>      unsigned long * map = NULL;
>      int ret=0, reset = 0;
> @@ -754,7 +754,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
> unsigned long gfn)
>      }
>  
>      /* Try to remove the page, restoring old mapping if it fails. */
> -    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
> +    p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
>                    p2m_populate_on_demand, p2m->default_access);
>      p2m_tlb_flush_sync(p2m);
>  
> @@ -871,7 +871,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long 
> *gfns, int count)
>          }
>  
>          /* Try to remove the page, restoring old mapping if it fails. */
> -        p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +        p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
>                        p2m_populate_on_demand, p2m->default_access);
>  
>          /* See if the page was successfully unmapped.  (Allow one refcount
> @@ -1073,7 +1073,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, 
> unsigned long gfn,
>           * NOTE: In a fine-grained p2m locking scenario this operation
>           * may need to promote its locking from gfn->1g superpage
>           */
> -        p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
> +        p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
>                        p2m_populate_on_demand, p2m->default_access);
>          return 0;
>      }
> @@ -1157,7 +1157,7 @@ remap_and_retry:
>       * need promoting the gfn lock from gfn->2M superpage */
>      gfn_aligned = (gfn>>order)<<order;
>      for(i=0; i<(1<<order); i++)
> -        p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +        p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
>                        p2m_populate_on_demand, p2m->default_access);
>      if ( tb_init_done )
>      {
> @@ -1215,7 +1215,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, 
> unsigned long gfn,
>      }
>  
>      /* Now, actually do the two-way mapping */
> -    rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order,
> +    rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
>                         p2m_populate_on_demand, p2m->default_access);
>      if ( rc == 0 )
>      {
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index 4980934..2b6e89e 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -511,7 +511,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long 
> gfn, mfn_t mfn,
>       * the intermediate one might be).
>       */
>      unsigned int flags, iommu_old_flags = 0;
> -    unsigned long old_mfn = INVALID_MFN;
> +    unsigned long old_mfn = mfn_x(INVALID_MFN);
>  
>      ASSERT(sve != 0);
>  
> @@ -764,7 +764,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long 
> gfn,
>                       p2m->max_mapped_pfn )
>                      break;
>          }
> -        return _mfn(INVALID_MFN);
> +        return INVALID_MFN;
>      }
>  
>      mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> @@ -777,7 +777,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long 
> gfn,
>          if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
>          {
>              unmap_domain_page(l4e);
> -            return _mfn(INVALID_MFN);
> +            return INVALID_MFN;
>          }
>          mfn = _mfn(l4e_get_pfn(*l4e));
>          recalc = needs_recalc(l4, *l4e);
> @@ -805,7 +805,7 @@ pod_retry_l3:
>                      *t = p2m_populate_on_demand;
>              }
>              unmap_domain_page(l3e);
> -            return _mfn(INVALID_MFN);
> +            return INVALID_MFN;
>          }
>          if ( flags & _PAGE_PSE )
>          {
> @@ -817,7 +817,7 @@ pod_retry_l3:
>              unmap_domain_page(l3e);
>  
>              ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> -            return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
> +            return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
>          }
>  
>          mfn = _mfn(l3e_get_pfn(*l3e));
> @@ -846,7 +846,7 @@ pod_retry_l2:
>          }
>      
>          unmap_domain_page(l2e);
> -        return _mfn(INVALID_MFN);
> +        return INVALID_MFN;
>      }
>      if ( flags & _PAGE_PSE )
>      {
> @@ -856,7 +856,7 @@ pod_retry_l2:
>          unmap_domain_page(l2e);
>          
>          ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> -        return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
> +        return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
>      }
>  
>      mfn = _mfn(l2e_get_pfn(*l2e));
> @@ -885,14 +885,14 @@ pod_retry_l1:
>          }
>      
>          unmap_domain_page(l1e);
> -        return _mfn(INVALID_MFN);
> +        return INVALID_MFN;
>      }
>      mfn = _mfn(l1e_get_pfn(*l1e));
>      *t = recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
>      unmap_domain_page(l1e);
>  
>      ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
> -    return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
> +    return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : INVALID_MFN;
>  }
>  
>  static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 6258a5b..6f90510 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -388,7 +388,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
> unsigned long gfn,
>      if (unlikely((p2m_is_broken(*t))))
>      {
>          /* Return invalid_mfn to avoid caller's access */
> -        mfn = _mfn(INVALID_MFN);
> +        mfn = INVALID_MFN;
>          if ( q & P2M_ALLOC )
>              domain_crash(p2m->domain);
>      }
> @@ -493,7 +493,7 @@ int p2m_set_entry(struct p2m_domain *p2m, unsigned long 
> gfn, mfn_t mfn,
>              rc = set_rc;
>  
>          gfn += 1ul << order;
> -        if ( mfn_x(mfn) != INVALID_MFN )
> +        if ( !mfn_eq(mfn, INVALID_MFN) )
>              mfn = _mfn(mfn_x(mfn) + (1ul << order));
>          todo -= 1ul << order;
>      }
> @@ -580,7 +580,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
>  
>      /* Initialise physmap tables for slot zero. Other code assumes this. */
>      p2m->defer_nested_flush = 1;
> -    rc = p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +    rc = p2m_set_entry(p2m, 0, INVALID_MFN, PAGE_ORDER_4K,
>                         p2m_invalid, p2m->default_access);
>      p2m->defer_nested_flush = 0;
>      p2m_unlock(p2m);
> @@ -670,7 +670,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
> gfn, unsigned long mfn,
>              ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
>          }
>      }
> -    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, 
> p2m_invalid,
> +    return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
>                           p2m->default_access);
>  }
>  
> @@ -840,7 +840,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
> mfn_t mfn,
>      {
>          gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> 
> %#lx)\n",
>                   gfn_x(gfn), mfn_x(mfn));
> -        rc = p2m_set_entry(p2m, gfn_x(gfn), _mfn(INVALID_MFN), page_order,
> +        rc = p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, page_order,
>                             p2m_invalid, p2m->default_access);
>          if ( rc == 0 )
>          {
> @@ -1107,7 +1107,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned 
> long gfn, mfn_t mfn,
>      }
>  
>      /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
> -    if ( (INVALID_MFN == mfn_x(actual_mfn)) || (t != p2m_mmio_direct) )
> +    if ( (mfn_eq(actual_mfn, INVALID_MFN)) || (t != p2m_mmio_direct) )
>      {
>          gdprintk(XENLOG_ERR,
>                   "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn, t);
> @@ -1117,7 +1117,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned 
> long gfn, mfn_t mfn,
>          gdprintk(XENLOG_WARNING,
>                   "no mapping between mfn %08lx and gfn %08lx\n",
>                   mfn_x(mfn), gfn);
> -    rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, p2m_invalid,
> +    rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
>                         p2m->default_access);
>  
>   out:
> @@ -1146,7 +1146,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
> long gfn)
>      mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
>      if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
>      {
> -        ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +        ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
>                              p2m_invalid, p2m->default_access);
>          gfn_unlock(p2m, gfn, 0);
>      }
> @@ -1316,7 +1316,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned 
> long gfn)
>          put_page(page);
>  
>      /* Remove mapping from p2m table */
> -    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
> +    ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
>                          p2m_ram_paged, a);
>  
>      /* Clear content before returning the page to Xen */
> @@ -1844,7 +1844,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
> uint32_t nr,
>      if ( altp2m_idx )
>      {
>          if ( altp2m_idx >= MAX_ALTP2M ||
> -             d->arch.altp2m_eptp[altp2m_idx] == INVALID_MFN )
> +             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
>              return -EINVAL;
>  
>          ap2m = d->arch.altp2m_p2m[altp2m_idx];
> @@ -1942,7 +1942,7 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, 
> xenmem_access_t *access)
>      mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
>      gfn_unlock(p2m, gfn, 0);
>  
> -    if ( mfn_x(mfn) == INVALID_MFN )
> +    if ( mfn_eq(mfn, INVALID_MFN) )
>          return -ESRCH;
>      
>      if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
> @@ -2288,7 +2288,7 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, 
> uint64_t eptp)
>  
>      for ( i = 0; i < MAX_ALTP2M; i++ )
>      {
> -        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
> +        if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
>              continue;
>  
>          p2m = d->arch.altp2m_p2m[i];
> @@ -2315,7 +2315,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
> unsigned int idx)
>  
>      altp2m_list_lock(d);
>  
> -    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
> +    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
>      {
>          if ( idx != vcpu_altp2m(v).p2midx )
>          {
> @@ -2359,14 +2359,14 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t 
> gpa,
>                                0, &page_order);
>      __put_gfn(*ap2m, gfn_x(gfn));
>  
> -    if ( mfn_x(mfn) != INVALID_MFN )
> +    if ( !mfn_eq(mfn, INVALID_MFN) )
>          return 0;
>  
>      mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
>                                P2M_ALLOC | P2M_UNSHARE, &page_order);
>      __put_gfn(hp2m, gfn_x(gfn));
>  
> -    if ( mfn_x(mfn) == INVALID_MFN )
> +    if ( mfn_eq(mfn, INVALID_MFN) )
>          return 0;
>  
>      p2m_lock(*ap2m);
> @@ -2404,7 +2404,7 @@ void p2m_flush_altp2m(struct domain *d)
>          /* Uninit and reinit ept to force TLB shootdown */
>          ept_p2m_uninit(d->arch.altp2m_p2m[i]);
>          ept_p2m_init(d->arch.altp2m_p2m[i]);
> -        d->arch.altp2m_eptp[i] = INVALID_MFN;
> +        d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
>      }
>  
>      altp2m_list_unlock(d);
> @@ -2431,7 +2431,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>  
>      altp2m_list_lock(d);
>  
> -    if ( d->arch.altp2m_eptp[idx] == INVALID_MFN )
> +    if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
>      {
>          p2m_init_altp2m_helper(d, idx);
>          rc = 0;
> @@ -2450,7 +2450,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t 
> *idx)
>  
>      for ( i = 0; i < MAX_ALTP2M; i++ )
>      {
> -        if ( d->arch.altp2m_eptp[i] != INVALID_MFN )
> +        if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              continue;
>  
>          p2m_init_altp2m_helper(d, i);
> @@ -2476,7 +2476,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>  
>      altp2m_list_lock(d);
>  
> -    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
> +    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
>      {
>          p2m = d->arch.altp2m_p2m[idx];
>  
> @@ -2486,7 +2486,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>              /* Uninit and reinit ept to force TLB shootdown */
>              ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
>              ept_p2m_init(d->arch.altp2m_p2m[idx]);
> -            d->arch.altp2m_eptp[idx] = INVALID_MFN;
> +            d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
>              rc = 0;
>          }
>      }
> @@ -2510,7 +2510,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, 
> unsigned int idx)
>  
>      altp2m_list_lock(d);
>  
> -    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
> +    if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
>      {
>          for_each_vcpu( d, v )
>              if ( idx != vcpu_altp2m(v).p2midx )
> @@ -2541,7 +2541,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned 
> int idx,
>      unsigned int page_order;
>      int rc = -EINVAL;
>  
> -    if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
> +    if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) 
> )
>          return rc;
>  
>      hp2m = p2m_get_hostp2m(d);
> @@ -2636,14 +2636,14 @@ void p2m_altp2m_propagate_change(struct domain *d, 
> gfn_t gfn,
>  
>      for ( i = 0; i < MAX_ALTP2M; i++ )
>      {
> -        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
> +        if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
>              continue;
>  
>          p2m = d->arch.altp2m_p2m[i];
>          m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
>  
>          /* Check for a dropped page that may impact this altp2m */
> -        if ( mfn_x(mfn) == INVALID_MFN &&
> +        if ( mfn_eq(mfn, INVALID_MFN) &&
>               gfn_x(gfn) >= p2m->min_remapped_gfn &&
>               gfn_x(gfn) <= p2m->max_remapped_gfn )
>          {
> @@ -2660,7 +2660,7 @@ void p2m_altp2m_propagate_change(struct domain *d, 
> gfn_t gfn,
>                  for ( i = 0; i < MAX_ALTP2M; i++ )
>                  {
>                      if ( i == last_reset_idx ||
> -                         d->arch.altp2m_eptp[i] == INVALID_MFN )
> +                         d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
>                          continue;
>  
>                      p2m = d->arch.altp2m_p2m[i];
> @@ -2672,7 +2672,7 @@ void p2m_altp2m_propagate_change(struct domain *d, 
> gfn_t gfn,
>                  goto out;
>              }
>          }
> -        else if ( mfn_x(m) != INVALID_MFN )
> +        else if ( !mfn_eq(m, INVALID_MFN) )
>              p2m_set_entry(p2m, gfn_x(gfn), mfn, page_order, p2mt, p2ma);
>  
>          __put_gfn(p2m, gfn_x(gfn));
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index 8219bb6..107fc8c 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -67,7 +67,7 @@ static mfn_t paging_new_log_dirty_page(struct domain *d)
>      if ( unlikely(page == NULL) )
>      {
>          d->arch.paging.log_dirty.failed_allocs++;
> -        return _mfn(INVALID_MFN);
> +        return INVALID_MFN;
>      }
>  
>      d->arch.paging.log_dirty.allocs++;
> @@ -95,7 +95,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
>          int i;
>          mfn_t *node = map_domain_page(mfn);
>          for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
> -            node[i] = _mfn(INVALID_MFN);
> +            node[i] = INVALID_MFN;
>          unmap_domain_page(node);
>      }
>      return mfn;
> @@ -167,7 +167,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
> int rc)
>  
>              unmap_domain_page(l2);
>              paging_free_log_dirty_page(d, l3[i3]);
> -            l3[i3] = _mfn(INVALID_MFN);
> +            l3[i3] = INVALID_MFN;
>  
>              if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() 
> )
>              {
> @@ -182,7 +182,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
> int rc)
>          if ( rc )
>              break;
>          paging_free_log_dirty_page(d, l4[i4]);
> -        l4[i4] = _mfn(INVALID_MFN);
> +        l4[i4] = INVALID_MFN;
>  
>          if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
>          {
> @@ -198,7 +198,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
> int rc)
>      if ( !rc )
>      {
>          paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
> -        d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
> +        d->arch.paging.log_dirty.top = INVALID_MFN;
>  
>          ASSERT(d->arch.paging.log_dirty.allocs == 0);
>          d->arch.paging.log_dirty.failed_allocs = 0;
> @@ -660,7 +660,7 @@ int paging_domain_init(struct domain *d, unsigned int 
> domcr_flags)
>      /* This must be initialized separately from the rest of the
>       * log-dirty init code as that can be called more than once and we
>       * don't want to leak any active log-dirty bitmaps */
> -    d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
> +    d->arch.paging.log_dirty.top = INVALID_MFN;
>  
>      /*
>       * Shadow pagetables are the default, but we will use
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index 226e32d..c51b370 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -88,10 +88,10 @@ void shadow_vcpu_init(struct vcpu *v)
>  
>      for ( i = 0; i < SHADOW_OOS_PAGES; i++ )
>      {
> -        v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN);
> -        v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN);
> +        v->arch.paging.shadow.oos[i] = INVALID_MFN;
> +        v->arch.paging.shadow.oos_snapshot[i] = INVALID_MFN;
>          for ( j = 0; j < SHADOW_OOS_FIXUPS; j++ )
> -            v->arch.paging.shadow.oos_fixup[i].smfn[j] = _mfn(INVALID_MFN);
> +            v->arch.paging.shadow.oos_fixup[i].smfn[j] = INVALID_MFN;
>      }
>  #endif
>  
> @@ -593,12 +593,12 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, 
> mfn_t gmfn,
>      int i;
>      for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ )
>      {
> -        if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
> +        if ( !mfn_eq(fixup->smfn[i], INVALID_MFN) )
>          {
>              sh_remove_write_access_from_sl1p(d, gmfn,
>                                               fixup->smfn[i],
>                                               fixup->off[i]);
> -            fixup->smfn[i] = _mfn(INVALID_MFN);
> +            fixup->smfn[i] = INVALID_MFN;
>          }
>      }
>  
> @@ -636,7 +636,7 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn,
>  
>              next = oos_fixup[idx].next;
>  
> -            if ( mfn_x(oos_fixup[idx].smfn[next]) != INVALID_MFN )
> +            if ( !mfn_eq(oos_fixup[idx].smfn[next], INVALID_MFN) )
>              {
>                  TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
>  
> @@ -757,7 +757,7 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
>      struct oos_fixup fixup = { .next = 0 };
>  
>      for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
> -        fixup.smfn[i] = _mfn(INVALID_MFN);
> +        fixup.smfn[i] = INVALID_MFN;
>  
>      idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
>      oidx = idx;
> @@ -807,7 +807,7 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn)
>              idx = (idx + 1) % SHADOW_OOS_PAGES;
>          if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
>          {
> -            oos[idx] = _mfn(INVALID_MFN);
> +            oos[idx] = INVALID_MFN;
>              return;
>          }
>      }
> @@ -838,7 +838,7 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn)
>  
>      SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn));
>      BUG();
> -    return _mfn(INVALID_MFN);
> +    return INVALID_MFN;
>  }
>  
>  /* Pull a single guest page back into sync */
> @@ -862,7 +862,7 @@ void sh_resync(struct domain *d, mfn_t gmfn)
>          if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
>          {
>              _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
> -            oos[idx] = _mfn(INVALID_MFN);
> +            oos[idx] = INVALID_MFN;
>              return;
>          }
>      }
> @@ -914,7 +914,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
> int others)
>          {
>              /* Write-protect and sync contents */
>              _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
> -            oos[idx] = _mfn(INVALID_MFN);
> +            oos[idx] = INVALID_MFN;
>          }
>  
>   resync_others:
> @@ -948,7 +948,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, 
> int others)
>              {
>                  /* Write-protect and sync contents */
>                  _sh_resync(other, oos[idx], &oos_fixup[idx], 
> oos_snapshot[idx]);
> -                oos[idx] = _mfn(INVALID_MFN);
> +                oos[idx] = INVALID_MFN;
>              }
>          }
>      }
> @@ -1784,7 +1784,7 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long 
> vaddr,
>      if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
>      {
>          /* Whole write fits on a single page. */
> -        sh_ctxt->mfn[1] = _mfn(INVALID_MFN);
> +        sh_ctxt->mfn[1] = INVALID_MFN;
>          map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
>      }
>      else if ( !is_hvm_domain(d) )
> @@ -2086,7 +2086,7 @@ mfn_t shadow_hash_lookup(struct domain *d, unsigned 
> long n, unsigned int t)
>      }
>  
>      perfc_incr(shadow_hash_lookup_miss);
> -    return _mfn(INVALID_MFN);
> +    return INVALID_MFN;
>  }
>  
>  void shadow_hash_insert(struct domain *d, unsigned long n, unsigned int t,
> @@ -2910,7 +2910,7 @@ void sh_reset_l3_up_pointers(struct vcpu *v)
>      };
>      static const unsigned int callback_mask = SHF_L3_64;
>  
> -    hash_vcpu_foreach(v, callback_mask, callbacks, _mfn(INVALID_MFN));
> +    hash_vcpu_foreach(v, callback_mask, callbacks, INVALID_MFN);
>  }
>  
>  
> @@ -2940,7 +2940,7 @@ static void sh_update_paging_modes(struct vcpu *v)
>  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
>  
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
> -    if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
> +    if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) )
>      {
>          int i;
>          for(i = 0; i < SHADOW_OOS_PAGES; i++)
> @@ -3284,7 +3284,7 @@ void shadow_teardown(struct domain *d, int *preempted)
>                  if ( mfn_valid(oos_snapshot[i]) )
>                  {
>                      shadow_free(d, oos_snapshot[i]);
> -                    oos_snapshot[i] = _mfn(INVALID_MFN);
> +                    oos_snapshot[i] = INVALID_MFN;
>                  }
>          }
>  #endif /* OOS */
> @@ -3449,7 +3449,7 @@ static int shadow_one_bit_disable(struct domain *d, u32 
> mode)
>                      if ( mfn_valid(oos_snapshot[i]) )
>                      {
>                          shadow_free(d, oos_snapshot[i]);
> -                        oos_snapshot[i] = _mfn(INVALID_MFN);
> +                        oos_snapshot[i] = INVALID_MFN;
>                      }
>              }
>  #endif /* OOS */
> @@ -3744,7 +3744,7 @@ int shadow_track_dirty_vram(struct domain *d,
>          memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
>      else
>      {
> -        unsigned long map_mfn = INVALID_MFN;
> +        unsigned long map_mfn = mfn_x(INVALID_MFN);
>          void *map_sl1p = NULL;
>  
>          /* Iterate over VRAM to track dirty bits. */
> @@ -3754,7 +3754,7 @@ int shadow_track_dirty_vram(struct domain *d,
>              int dirty = 0;
>              paddr_t sl1ma = dirty_vram->sl1ma[i];
>  
> -            if (mfn_x(mfn) == INVALID_MFN)
> +            if ( !mfn_eq(mfn, INVALID_MFN) )
>              {
>                  dirty = 1;
>              }
> @@ -3830,7 +3830,7 @@ int shadow_track_dirty_vram(struct domain *d,
>              for ( i = begin_pfn; i < end_pfn; i++ )
>              {
>                  mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
> -                if ( mfn_x(mfn) != INVALID_MFN )
> +                if ( !mfn_eq(mfn, INVALID_MFN) )
>                      flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
>              }
>              dirty_vram->last_dirty = -1;
> @@ -3968,7 +3968,7 @@ void shadow_audit_tables(struct vcpu *v)
>          }
>      }
>  
> -    hash_vcpu_foreach(v, mask, callbacks, _mfn(INVALID_MFN));
> +    hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN_T);
>  }
>  
>  #endif /* Shadow audit */
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index dfe59a2..f892e2f 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -177,7 +177,7 @@ sh_walk_guest_tables(struct vcpu *v, unsigned long va, 
> walk_t *gw,
>  {
>      return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
>  #if GUEST_PAGING_LEVELS == 3 /* PAE */
> -                             _mfn(INVALID_MFN),
> +                             INVALID_MFN,
>                               v->arch.paging.shadow.gl3e
>  #else /* 32 or 64 */
>                               pagetable_get_mfn(v->arch.guest_table),
> @@ -336,32 +336,32 @@ static void sh_audit_gw(struct vcpu *v, walk_t *gw)
>      if ( mfn_valid(gw->l4mfn)
>           && mfn_valid((smfn = get_shadow_status(d, gw->l4mfn,
>                                                  SH_type_l4_shadow))) )
> -        (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
> +        (void) sh_audit_l4_table(v, smfn, INVALID_MFN);
>      if ( mfn_valid(gw->l3mfn)
>           && mfn_valid((smfn = get_shadow_status(d, gw->l3mfn,
>                                                  SH_type_l3_shadow))) )
> -        (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
> +        (void) sh_audit_l3_table(v, smfn, INVALID_MFN);
>  #endif /* PAE or 64... */
>      if ( mfn_valid(gw->l2mfn) )
>      {
>          if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
>                                                   SH_type_l2_shadow))) )
> -            (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
> +            (void) sh_audit_l2_table(v, smfn, INVALID_MFN);
>  #if GUEST_PAGING_LEVELS == 3
>          if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
>                                                   SH_type_l2h_shadow))) )
> -            (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
> +            (void) sh_audit_l2_table(v, smfn, INVALID_MFN);
>  #endif
>      }
>      if ( mfn_valid(gw->l1mfn)
>           && mfn_valid((smfn = get_shadow_status(d, gw->l1mfn,
>                                                  SH_type_l1_shadow))) )
> -        (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
> +        (void) sh_audit_l1_table(v, smfn, INVALID_MFN);
>      else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
>                && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
>                && mfn_valid(
>                (smfn = get_fl1_shadow_status(d, guest_l2e_get_gfn(gw->l2e)))) 
> )
> -        (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
> +        (void) sh_audit_fl1_table(v, smfn, INVALID_MFN);
>  }
>  
>  #else
> @@ -1752,7 +1752,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
> vcpu *v,
>  {
>  #if GUEST_PAGING_LEVELS >= 4 /* 64bit... */
>      struct domain *d = v->domain;
> -    mfn_t sl3mfn = _mfn(INVALID_MFN);
> +    mfn_t sl3mfn = INVALID_MFN;
>      shadow_l3e_t *sl3e;
>      if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
>      /* Get the l3e */
> @@ -2158,7 +2158,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, 
> mfn_t sl4mfn, void *se)
>      shadow_l4e_t new_sl4e;
>      guest_l4e_t new_gl4e = *(guest_l4e_t *)new_ge;
>      shadow_l4e_t *sl4p = se;
> -    mfn_t sl3mfn = _mfn(INVALID_MFN);
> +    mfn_t sl3mfn = INVALID_MFN;
>      struct domain *d = v->domain;
>      p2m_type_t p2mt;
>      int result = 0;
> @@ -2217,7 +2217,7 @@ static int validate_gl3e(struct vcpu *v, void *new_ge, 
> mfn_t sl3mfn, void *se)
>      shadow_l3e_t new_sl3e;
>      guest_l3e_t new_gl3e = *(guest_l3e_t *)new_ge;
>      shadow_l3e_t *sl3p = se;
> -    mfn_t sl2mfn = _mfn(INVALID_MFN);
> +    mfn_t sl2mfn = INVALID_MFN;
>      p2m_type_t p2mt;
>      int result = 0;
>  
> @@ -2250,7 +2250,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
> mfn_t sl2mfn, void *se)
>      shadow_l2e_t new_sl2e;
>      guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
>      shadow_l2e_t *sl2p = se;
> -    mfn_t sl1mfn = _mfn(INVALID_MFN);
> +    mfn_t sl1mfn = INVALID_MFN;
>      p2m_type_t p2mt;
>      int result = 0;
>  
> @@ -2608,7 +2608,7 @@ static inline void check_for_early_unshadow(struct vcpu 
> *v, mfn_t gmfn)
>  static inline void reset_early_unshadow(struct vcpu *v)
>  {
>  #if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
> -    v->arch.paging.shadow.last_emulated_mfn_for_unshadow = INVALID_MFN;
> +    v->arch.paging.shadow.last_emulated_mfn_for_unshadow = 
> mfn_x(INVALID_MFN);
>  #endif
>  }
>  
> @@ -4105,10 +4105,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
>                                             ? SH_type_l2h_shadow
>                                             : SH_type_l2_shadow);
>                  else
> -                    sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
> +                    sh_set_toplevel_shadow(v, i, INVALID_MFN, 0);
>              }
>              else
> -                sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
> +                sh_set_toplevel_shadow(v, i, INVALID_MFN, 0);
>          }
>      }
>  #elif GUEST_PAGING_LEVELS == 4
> @@ -4531,7 +4531,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t 
> gpa)
>  
>          if ( fast_path ) {
>              if ( pagetable_is_null(v->arch.shadow_table[i]) )
> -                smfn = _mfn(INVALID_MFN);
> +                smfn = INVALID_MFN;
>              else
>                  smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[i]));
>          }
> @@ -4540,8 +4540,8 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t 
> gpa)
>              /* retrieving the l2s */
>              gmfn = get_gfn_query_unlocked(d, 
> gfn_x(guest_l3e_get_gfn(gl3e[i])),
>                                            &p2mt);
> -            smfn = unlikely(mfn_x(gmfn) == INVALID_MFN)
> -                   ? _mfn(INVALID_MFN)
> +            smfn = unlikely(mfn_eq(gmfn, INVALID_MFN))
> +                   ? INVALID_MFN
>                     : shadow_hash_lookup(d, mfn_x(gmfn), 
> SH_type_l2_pae_shadow);
>          }
>  
> @@ -4846,7 +4846,7 @@ int sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, 
> mfn_t x)
>  {
>      guest_l1e_t *gl1e, e;
>      shadow_l1e_t *sl1e;
> -    mfn_t gl1mfn = _mfn(INVALID_MFN);
> +    mfn_t gl1mfn = INVALID_MFN;
>      int f;
>      int done = 0;
>  
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 45273d4..42c07ee 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -117,7 +117,7 @@ static void vcpu_info_reset(struct vcpu *v)
>      v->vcpu_info = ((v->vcpu_id < XEN_LEGACY_MAX_VCPUS)
>                      ? (vcpu_info_t *)&shared_info(d, vcpu_info[v->vcpu_id])
>                      : &dummy_vcpu_info);
> -    v->vcpu_info_mfn = INVALID_MFN;
> +    v->vcpu_info_mfn = mfn_x(INVALID_MFN);
>  }
>  
>  struct vcpu *alloc_vcpu(
> @@ -1141,7 +1141,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, 
> unsigned offset)
>      if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
>          return -EINVAL;
>  
> -    if ( v->vcpu_info_mfn != INVALID_MFN )
> +    if ( v->vcpu_info_mfn != mfn_x(INVALID_MFN) )
>          return -EINVAL;
>  
>      /* Run this command on yourself or on other offline VCPUS. */
> @@ -1205,7 +1205,7 @@ void unmap_vcpu_info(struct vcpu *v)
>  {
>      unsigned long mfn;
>  
> -    if ( v->vcpu_info_mfn == INVALID_MFN )
> +    if ( v->vcpu_info_mfn == mfn_x(INVALID_MFN) )
>          return;
>  
>      mfn = v->vcpu_info_mfn;
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index 3f15543..ecace07 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -244,7 +244,7 @@ static int __get_paged_frame(unsigned long gfn, unsigned 
> long *frame, struct pag
>                                (readonly) ? P2M_ALLOC : P2M_UNSHARE);
>      if ( !(*page) )
>      {
> -        *frame = INVALID_MFN;
> +        *frame = mfn_x(INVALID_MFN);
>          if ( p2m_is_shared(p2mt) )
>              return GNTST_eagain;
>          if ( p2m_is_paging(p2mt) )
> @@ -260,7 +260,7 @@ static int __get_paged_frame(unsigned long gfn, unsigned 
> long *frame, struct pag
>      *page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
>      if ( (!(*page)) || (!get_page(*page, rd)) )
>      {
> -        *frame = INVALID_MFN;
> +        *frame = mfn_x(INVALID_MFN);
>          *page = NULL;
>          rc = GNTST_bad_page;
>      }
> @@ -1785,7 +1785,7 @@ gnttab_transfer(
>              p2m_type_t __p2mt;
>              mfn = mfn_x(get_gfn_unshare(d, gop.mfn, &__p2mt));
>              if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) )
> -                mfn = INVALID_MFN;
> +                mfn = mfn_x(INVALID_MFN);
>          }
>  #else
>          mfn = mfn_x(gfn_to_mfn(d, _gfn(gop.mfn)));
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index afbb1a1..7f207ec 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -55,7 +55,7 @@
>  
>  TYPE_SAFE(unsigned long, mfn);
>  #define PRI_mfn          "05lx"
> -#define INVALID_MFN      (~0UL)
> +#define INVALID_MFN      _mfn(~0UL)
>  
>  #ifndef mfn_t
>  #define mfn_t /* Grep fodder: mfn_t, _mfn() and mfn_x() are defined above */
> -- 
> 1.9.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.