[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] xen/x86: Use maddr_to_page and maddr_to_mfn to avoid open-coded >> PAGE_SHIFT



The constructions _mfn(... > PAGE_SHIFT) and mfn_to_page(... >> PAGE_SHIFT)
could respectively be replaced by maddr_to_mfn(...) and
maddr_to_page(...).

Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>

---

Cc: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/debug.c            | 2 +-
 xen/arch/x86/mm/shadow/common.c | 2 +-
 xen/arch/x86/mm/shadow/multi.c  | 6 +++---
 xen/common/kimage.c             | 6 +++---
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 1c10b84a16..9159f32db4 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -98,7 +98,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
     l2_pgentry_t l2e, *l2t;
     l1_pgentry_t l1e, *l1t;
     unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
-    mfn_t mfn = _mfn(cr3 >> PAGE_SHIFT);
+    mfn_t mfn = maddr_to_mfn(cr3);
 
     DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, 
           cr3, pgd3val);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 86186cccdf..f65d2a6523 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2640,7 +2640,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, 
mfn_t smfn)
     ASSERT(sh_type_has_up_pointer(d, sp->u.sh.type));
 
     if (sp->up == 0) return 0;
-    pmfn = _mfn(sp->up >> PAGE_SHIFT);
+    pmfn = maddr_to_mfn(sp->up);
     ASSERT(mfn_valid(pmfn));
     vaddr = map_domain_page(pmfn);
     ASSERT(vaddr);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 28030acbf6..1e42e1d8ab 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2425,7 +2425,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     sp = mfn_to_page(smfn);
     if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
-    smfn = _mfn(sp->up >> PAGE_SHIFT);
+    smfn = maddr_to_mfn(sp->up);
     ASSERT(mfn_valid(smfn));
 
 #if (SHADOW_PAGING_LEVELS == 4)
@@ -2434,7 +2434,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     ASSERT(sh_type_has_up_pointer(d, SH_type_l2_shadow));
     if ( sp->u.sh.count != 1 || !sp->up )
         return 0;
-    smfn = _mfn(sp->up >> PAGE_SHIFT);
+    smfn = maddr_to_mfn(sp->up);
     ASSERT(mfn_valid(smfn));
 
     /* up to l4 */
@@ -2442,7 +2442,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     if ( sp->u.sh.count != 1
          || !sh_type_has_up_pointer(d, SH_type_l3_64_shadow) || !sp->up )
         return 0;
-    smfn = _mfn(sp->up >> PAGE_SHIFT);
+    smfn = maddr_to_mfn(sp->up);
     ASSERT(mfn_valid(smfn));
 #endif
 
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index cf624d10fd..ebc71affd1 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -504,7 +504,7 @@ static void kimage_free_entry(kimage_entry_t entry)
 {
     struct page_info *page;
 
-    page = mfn_to_page(entry >> PAGE_SHIFT);
+    page = maddr_to_page(entry);
     free_domheap_page(page);
 }
 
@@ -636,8 +636,8 @@ static struct page_info *kimage_alloc_page(struct 
kexec_image *image,
         if ( old )
         {
             /* If so move it. */
-            mfn_t old_mfn = _mfn(*old >> PAGE_SHIFT);
-            mfn_t mfn = _mfn(addr >> PAGE_SHIFT);
+            mfn_t old_mfn = maddr_to_mfn(*old);
+            mfn_t mfn = maddr_to_mfn(addr);
 
             copy_domain_page(mfn, old_mfn);
             clear_domain_page(old_mfn);
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.