[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/mm: Use mfn_eq()/mfn_add() rather than opencoded variations



commit 3f56004f890ced1ddf7c861910ef8e7783764476
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Jun 1 12:56:09 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Aug 28 18:42:17 2018 +0100

    x86/mm: Use mfn_eq()/mfn_add() rather than opencoded variations
    
    Use l1e_get_mfn() in place of l1e_get_pfn() when applicable, and fix up 
style
    on affected lines.
    
    For sh_remove_shadow_via_pointer(), map_domain_page() is guaranteed to 
succeed
    so there is no need to ASSERT() its success.  This allows the pointer
    arithmetic to folded into the previous expression, and for vaddr to be
    properly typed as l1_pgentry_t, avoiding the cast in l1e_get_mfn().
    
    No functional change.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c  |  2 +-
 xen/arch/x86/domain_page.c      |  2 +-
 xen/arch/x86/mm/hap/hap.c       |  3 ++-
 xen/arch/x86/mm/mem_sharing.c   |  4 ++--
 xen/arch/x86/mm/p2m-pod.c       |  2 +-
 xen/arch/x86/mm/p2m.c           |  4 ++--
 xen/arch/x86/mm/shadow/common.c | 34 ++++++++++++++++------------------
 xen/arch/x86/mm/shadow/multi.c  | 23 +++++++++++++----------
 8 files changed, 38 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 467125b327..302e13a14d 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -542,7 +542,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
unsigned long gfn)
     r_mfn = get_gfn_query(d, gfn, &pt);
     if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
     {
-        ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
+        ASSERT(mfn_eq(r_mfn, mfn));
         rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken);
     }
     put_gfn(d, gfn);
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0c24530ed9..aee9a80720 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -101,7 +101,7 @@ void *map_domain_page(mfn_t mfn)
         ASSERT(idx < dcache->entries);
         hashent->refcnt++;
         ASSERT(hashent->refcnt);
-        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn_x(mfn));
+        ASSERT(mfn_eq(l1e_get_mfn(MAPCACHE_L1ENT(idx)), mfn));
         goto out;
     }
 
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 812a8405df..d6449e6001 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -729,7 +729,8 @@ hap_write_p2m_entry(struct domain *d, unsigned long gfn, 
l1_pgentry_t *p,
          * unless the only change is an increase in access rights. */
         mfn_t omfn = l1e_get_mfn(*p);
         mfn_t nmfn = l1e_get_mfn(new);
-        flush_nestedp2m = !( mfn_x(omfn) == mfn_x(nmfn)
+
+        flush_nestedp2m = !(mfn_eq(omfn, nmfn)
             && perms_strictly_increased(old_flags, l1e_get_flags(new)) );
     }
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index fad8a9df13..5c08adb3ff 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -500,7 +500,7 @@ static int audit(void)
                 continue;
             }
             o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); 
-            if ( mfn_x(o_mfn) != mfn_x(mfn) )
+            if ( !mfn_eq(o_mfn, mfn) )
             {
                 MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
                                   "Expecting MFN=%lx, got %lx\n",
@@ -904,7 +904,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
 
     /* This tricky business is to avoid two callers deadlocking if 
      * grabbing pages in opposite client/source order */
-    if( mfn_x(smfn) == mfn_x(cmfn) )
+    if ( mfn_eq(smfn, cmfn) )
     {
         /* The pages are already the same.  We could return some
          * kind of error here, but no matter how you look at it,
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 631e9aec33..ba37344ca0 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -75,7 +75,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m,
     {
         struct domain * od;
 
-        p = mfn_to_page(_mfn(mfn_x(mfn) + i));
+        p = mfn_to_page(mfn_add(mfn, i));
         od = page_get_owner(p);
         if ( od != d )
         {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 1089b86505..6020553c17 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1104,7 +1104,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
 
         for ( i = 0; i < (1UL << order); ++i )
         {
-            ASSERT(mfn_valid(_mfn(mfn_x(omfn) + i)));
+            ASSERT(mfn_valid(mfn_add(omfn, i)));
             set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
         }
     }
@@ -1222,7 +1222,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long 
gfn_l, mfn_t mfn,
                  "gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn_l, t);
         goto out;
     }
-    if ( mfn_x(mfn) != mfn_x(actual_mfn) )
+    if ( !mfn_eq(mfn, actual_mfn) )
         gdprintk(XENLOG_WARNING,
                  "no mapping between mfn %08lx and gfn %08lx\n",
                  mfn_x(mfn), gfn_l);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 1930a1d120..c54a0f2e09 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -318,10 +318,10 @@ void oos_audit_hash_is_present(struct domain *d, mfn_t 
gmfn)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+        if ( !mfn_eq(oos[idx], gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
 
-        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+        if ( mfn_eq(oos[idx], gmfn) )
             return;
     }
 
@@ -389,15 +389,15 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn,
         oos = v->arch.paging.shadow.oos;
         oos_fixup = v->arch.paging.shadow.oos_fixup;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+        if ( !mfn_eq(oos[idx], gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+        if ( mfn_eq(oos[idx], gmfn) )
         {
             int i;
             for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ )
             {
                 if ( mfn_valid(oos_fixup[idx].smfn[i])
-                     && (mfn_x(oos_fixup[idx].smfn[i]) == mfn_x(smfn))
+                     && mfn_eq(oos_fixup[idx].smfn[i], smfn)
                      && (oos_fixup[idx].off[i] == off) )
                     return;
             }
@@ -570,9 +570,9 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn)
     {
         oos = v->arch.paging.shadow.oos;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+        if ( !mfn_eq(oos[idx], gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+        if ( mfn_eq(oos[idx], gmfn) )
         {
             oos[idx] = INVALID_MFN;
             return;
@@ -595,9 +595,9 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn)
         oos = v->arch.paging.shadow.oos;
         oos_snapshot = v->arch.paging.shadow.oos_snapshot;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+        if ( !mfn_eq(oos[idx], gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+        if ( mfn_eq(oos[idx], gmfn) )
         {
             return oos_snapshot[idx];
         }
@@ -622,10 +622,10 @@ void sh_resync(struct domain *d, mfn_t gmfn)
         oos_fixup = v->arch.paging.shadow.oos_fixup;
         oos_snapshot = v->arch.paging.shadow.oos_snapshot;
         idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
-        if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
+        if ( !mfn_eq(oos[idx], gmfn) )
             idx = (idx + 1) % SHADOW_OOS_PAGES;
 
-        if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
+        if ( mfn_eq(oos[idx], gmfn) )
         {
             _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
             oos[idx] = INVALID_MFN;
@@ -2231,7 +2231,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, 
mfn_t smfn)
 {
     struct page_info *sp = mfn_to_page(smfn);
     mfn_t pmfn;
-    void *vaddr;
+    l1_pgentry_t *vaddr;
     int rc;
 
     ASSERT(sp->u.sh.type > 0);
@@ -2241,10 +2241,8 @@ static int sh_remove_shadow_via_pointer(struct domain 
*d, mfn_t smfn)
     if (sp->up == 0) return 0;
     pmfn = maddr_to_mfn(sp->up);
     ASSERT(mfn_valid(pmfn));
-    vaddr = map_domain_page(pmfn);
-    ASSERT(vaddr);
-    vaddr += sp->up & (PAGE_SIZE-1);
-    ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
+    vaddr = map_domain_page(pmfn) + (sp->up & (PAGE_SIZE - 1));
+    ASSERT(mfn_eq(l1e_get_mfn(*vaddr), smfn));
 
     /* Is this the only reference to this shadow? */
     rc = (sp->u.sh.count == 1) ? 1 : 0;
@@ -3128,7 +3126,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
             {
                 if ( !npte
                      || !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i])))
-                     || l1e_get_pfn(npte[i]) != mfn_x(omfn) )
+                     || !mfn_eq(l1e_get_mfn(npte[i]), omfn) )
                 {
                     /* This GFN->MFN mapping has gone away */
                     sh_remove_all_shadows_and_parents(d, omfn);
@@ -3136,7 +3134,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                                                 _gfn(gfn + (i << PAGE_SHIFT))) 
)
                         cpumask_or(&flushmask, &flushmask, d->dirty_cpumask);
                 }
-                omfn = _mfn(mfn_x(omfn) + 1);
+                omfn = mfn_add(omfn, 1);
             }
             flush_tlb_mask(&flushmask);
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 9e43533f69..569d18eb7e 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -960,7 +960,8 @@ static int shadow_set_l4e(struct domain *d,
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e);
-        if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e)))
+
+        if ( !mfn_eq(osl3mfn, shadow_l4e_get_mfn(new_sl4e))
              || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e),
                                           shadow_l4e_get_flags(new_sl4e)) )
         {
@@ -1006,7 +1007,8 @@ static int shadow_set_l3e(struct domain *d,
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e);
-        if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) ||
+
+        if ( !mfn_eq(osl2mfn, shadow_l3e_get_mfn(new_sl3e)) ||
              !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e),
                                        shadow_l3e_get_flags(new_sl3e)) )
         {
@@ -1091,7 +1093,8 @@ static int shadow_set_l2e(struct domain *d,
     {
         /* We lost a reference to an old mfn. */
         mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e);
-        if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) ||
+
+        if ( !mfn_eq(osl1mfn, shadow_l2e_get_mfn(new_sl2e)) ||
              !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e),
                                        shadow_l2e_get_flags(new_sl2e)) )
         {
@@ -2447,7 +2450,7 @@ sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
         smfn2 = smfn;
         guest_idx = guest_index(new_gp);
         shadow_idx = shadow_index(&smfn2, guest_idx);
-        if ( mfn_x(smfn2) != mfn_x(map_mfn) )
+        if ( !mfn_eq(smfn2, map_mfn) )
         {
             /* We have moved to another page of the shadow */
             map_mfn = smfn2;
@@ -4282,7 +4285,7 @@ int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t 
gmfn,
     sl1e = *sl1p;
     if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW))
           != (_PAGE_PRESENT|_PAGE_RW))
-         || (mfn_x(shadow_l1e_get_mfn(sl1e)) != mfn_x(gmfn)) )
+         || !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) )
     {
         unmap_domain_page(sl1p);
         goto fail;
@@ -4351,7 +4354,7 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long 
vaddr, mfn_t gmfn)
     sl1e = *sl1p;
     if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW))
           != (_PAGE_PRESENT|_PAGE_RW))
-         || (mfn_x(shadow_l1e_get_mfn(sl1e)) != mfn_x(gmfn)) )
+         || !mfn_eq(shadow_l1e_get_mfn(sl1e), gmfn) )
         return 0;
 
     /* Found it!  Need to remove its write permissions. */
@@ -4763,7 +4766,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn, mfn_t 
x)
                 gfn = guest_l1e_get_gfn(*gl1e);
                 mfn = shadow_l1e_get_mfn(*sl1e);
                 gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
-                if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) )
+                if ( !p2m_is_grant(p2mt) && !mfn_eq(gmfn, mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
                                gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn));
@@ -4837,7 +4840,7 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t sl2mfn, mfn_t 
x)
                 : get_shadow_status(d,
                     get_gfn_query_unlocked(d, gfn_x(gfn),
                                         &p2mt), SH_type_l1_shadow);
-            if ( mfn_x(gmfn) != mfn_x(mfn) )
+            if ( !mfn_eq(gmfn, mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
                            " (--> %" PRI_mfn ")"
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -4892,7 +4895,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t 
x)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow
                                      : SH_type_l2_shadow);
-            if ( mfn_x(gmfn) != mfn_x(mfn) )
+            if ( !mfn_eq(gmfn, mfn) )
                 AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn));
@@ -4937,7 +4940,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn, mfn_t 
x)
             gmfn = get_shadow_status(d, get_gfn_query_unlocked(
                                      d, gfn_x(gfn), &p2mt),
                                      SH_type_l3_shadow);
-            if ( mfn_x(gmfn) != mfn_x(mfn) )
+            if ( !mfn_eq(gmfn, mfn) )
                 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn), mfn_x(gmfn), mfn_x(mfn));
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.