[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 11/14] xen/mm: Switch mfn_to_virt()/virt_to_mfn() to using mfn_t



Seemingly, a majority of users either override the helpers anyway, or have an
mfn_t in their hands.

Update the API, and adjust all users to match.  In some places, use the
unsigned long variant in places where we are interacting with an external
struct and there is no chance of them switching to mfn_t.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/arm/alternative.c          |  4 ----
 xen/arch/arm/cpuerrata.c            |  4 ----
 xen/arch/arm/domain_build.c         |  4 ----
 xen/arch/arm/livepatch.c            |  4 ----
 xen/arch/arm/mm.c                   |  6 ------
 xen/arch/arm/setup.c                |  4 ++--
 xen/arch/x86/domain.c               |  2 +-
 xen/arch/x86/domain_page.c          | 10 +++++-----
 xen/arch/x86/guest/xen.c            |  2 +-
 xen/arch/x86/hvm/dom0_build.c       |  4 ++--
 xen/arch/x86/mm.c                   | 15 ++++++---------
 xen/arch/x86/numa.c                 |  2 +-
 xen/arch/x86/pv/descriptor-tables.c |  2 +-
 xen/arch/x86/pv/dom0_build.c        |  5 ++---
 xen/arch/x86/pv/shim.c              |  3 ---
 xen/arch/x86/setup.c                | 10 +++++-----
 xen/arch/x86/srat.c                 |  2 +-
 xen/arch/x86/tboot.c                |  4 ++--
 xen/arch/x86/x86_64/mm.c            | 10 +++++-----
 xen/common/domctl.c                 |  2 +-
 xen/common/efi/boot.c               |  7 ++++---
 xen/common/grant_table.c            |  4 ++--
 xen/common/page_alloc.c             | 10 +++++-----
 xen/common/tmem.c                   |  2 +-
 xen/common/trace.c                  | 20 +++++++++++---------
 xen/common/xenoprof.c               |  4 ----
 xen/drivers/acpi/osl.c              |  2 +-
 xen/include/asm-arm/mm.h            |  6 +++---
 xen/include/asm-x86/mm.h            |  2 +-
 xen/include/asm-x86/page.h          |  4 ++--
 xen/include/xen/domain_page.h       |  6 +++---
 31 files changed, 68 insertions(+), 98 deletions(-)

diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c
index 52ed7ed..22ab4e2 100644
--- a/xen/arch/arm/alternative.c
+++ b/xen/arch/arm/alternative.c
@@ -34,10 +34,6 @@
 #include <asm/livepatch.h>
 #include <asm/page.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
 
 struct alt_region {
diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c
index adf88e7..992c3a7 100644
--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -13,10 +13,6 @@
 #include <asm/insn.h>
 #include <asm/psci.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 /* Hardening Branch predictor code for Arm64 */
 #ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR
 
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index b0ec3f0..e201787 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -47,10 +47,6 @@ struct map_range_data
     p2m_type_t p2mt;
 };
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 //#define DEBUG_11_ALLOCATION
 #ifdef DEBUG_11_ALLOCATION
 # define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args)
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 279d52c..a8c6e24 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -12,10 +12,6 @@
 #include <asm/livepatch.h>
 #include <asm/mm.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 void *vmap_of_xen_text;
 
 int arch_livepatch_quiesce(void)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 7a06a33..9db69e6 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -44,12 +44,6 @@
 
 struct domain *dom_xen, *dom_io, *dom_cow;
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_virt
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
-
 /* Static start-of-day pagetables that we use before the allocators
  * are up. These are used by all CPUs during bringup before switching
  * to the CPUs own pagetables.
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index e83221a..77d19a8 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -655,7 +655,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t 
dtb_size)
     init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));
 
     /* Copy the DTB. */
-    fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1)));
+    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
     copy_from_paddr(fdt, dtb_paddr, dtb_size);
     device_tree_flattened = fdt;
 
@@ -765,7 +765,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t 
dtb_size)
     dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
 
     /* Copy the DTB. */
-    fdt = mfn_to_virt(mfn_x(alloc_boot_pages(dtb_pages, 1)));
+    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
     copy_from_paddr(fdt, dtb_paddr, dtb_size);
     device_tree_flattened = fdt;
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 327c961..14902fe 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1709,7 +1709,7 @@ static void __context_switch(void)
                                     per_cpu(compat_gdt_table, cpu);
     if ( need_full_gdt(nd) )
     {
-        unsigned long mfn = virt_to_mfn(gdt);
+        unsigned long mfn = mfn_x(virt_to_mfn(gdt));
         l1_pgentry_t *pl1e = pv_gdt_ptes(n);
         unsigned int i;
 
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 4a07cfb..0d7cafc 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -78,17 +78,17 @@ void *map_domain_page(mfn_t mfn)
 
 #ifdef NDEBUG
     if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 #endif
 
     v = mapcache_current_vcpu();
     if ( !v || !is_pv_vcpu(v) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 
     dcache = &v->domain->arch.pv.mapcache;
     vcache = &v->arch.pv.mapcache;
     if ( !dcache->inuse )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 
     perfc_incr(map_domain_page_count);
 
@@ -311,7 +311,7 @@ void *map_domain_page_global(mfn_t mfn)
 
 #ifdef NDEBUG
     if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 #endif
 
     return vmap(&mfn, 1);
@@ -336,7 +336,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr)
     const l1_pgentry_t *pl1e;
 
     if ( va >= DIRECTMAP_VIRT_START )
-        return _mfn(virt_to_mfn(ptr));
+        return virt_to_mfn(ptr);
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
     {
diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index 8cee880..b2c7306 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -131,7 +131,7 @@ static int map_vcpuinfo(void)
         return 0;
     }
 
-    info.mfn = virt_to_mfn(&vcpu_info[vcpu]);
+    info.mfn = __virt_to_mfn(&vcpu_info[vcpu]);
     info.offset = (unsigned long)&vcpu_info[vcpu] & ~PAGE_MASK;
     rc = xen_hypercall_vcpu_op(VCPUOP_register_vcpu_info, vcpu, &info);
     if ( rc )
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 3e29cd3..88bb4ca 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -403,7 +403,7 @@ static int __init pvh_setup_p2m(struct domain *d)
                                  d->arch.e820[i].addr + d->arch.e820[i].size);
             enum hvm_translation_result res =
                  hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
-                                        mfn_to_virt(addr),
+                                        mfn_to_virt(_mfn(addr)),
                                         d->arch.e820[i].addr - end,
                                         v);
 
@@ -489,7 +489,7 @@ static int __init pvh_load_kernel(struct domain *d, const 
module_t *image,
 
     if ( initrd != NULL )
     {
-        rc = hvm_copy_to_guest_phys(last_addr, mfn_to_virt(initrd->mod_start),
+        rc = hvm_copy_to_guest_phys(last_addr, 
__mfn_to_virt(initrd->mod_start),
                                     initrd->mod_end, v);
         if ( rc )
         {
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c3c7628..901c56f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -132,10 +132,6 @@
 
 #include "pv/mm.h"
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(v) _mfn(__virt_to_mfn(v))
-
 /* Mapping of the fixmap space needed early. */
 l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
     l1_fixmap[L1_PAGETABLE_ENTRIES];
@@ -330,8 +326,8 @@ void __init arch_init_memory(void)
         iostart_pfn = max_t(unsigned long, pfn, 1UL << (20 - PAGE_SHIFT));
         ioend_pfn = min(rstart_pfn, 16UL << (20 - PAGE_SHIFT));
         if ( iostart_pfn < ioend_pfn )
-            destroy_xen_mappings((unsigned long)mfn_to_virt(iostart_pfn),
-                                 (unsigned long)mfn_to_virt(ioend_pfn));
+            destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(iostart_pfn)),
+                                 (unsigned long)mfn_to_virt(_mfn(ioend_pfn)));
 
         /* Mark as I/O up to next RAM region. */
         for ( ; pfn < rstart_pfn; pfn++ )
@@ -833,8 +829,9 @@ static int update_xen_mappings(unsigned long mfn, unsigned 
int cacheattr)
     if ( unlikely(alias) && cacheattr )
         err = map_pages_to_xen(xen_va, _mfn(mfn), 1, 0);
     if ( !err )
-        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), _mfn(mfn), 1,
-                     PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
+        err = map_pages_to_xen(
+            (unsigned long)mfn_to_virt(_mfn(mfn)), _mfn(mfn), 1,
+            PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
     if ( unlikely(alias) && !cacheattr && !err )
         err = map_pages_to_xen(xen_va, _mfn(mfn), 1, PAGE_HYPERVISOR);
     return err;
@@ -4769,7 +4766,7 @@ void *alloc_xen_pagetable(void)
         return ptr;
     }
 
-    return mfn_to_virt(mfn_x(alloc_boot_pages(1, 1)));
+    return mfn_to_virt(alloc_boot_pages(1, 1));
 }
 
 void free_xen_pagetable(void *v)
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index b3c9c12..d18164b 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -101,7 +101,7 @@ static int __init allocate_cachealigned_memnodemap(void)
     unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
     unsigned long mfn = mfn_x(alloc_boot_pages(size, 1));
 
-    memnodemap = mfn_to_virt(mfn);
+    memnodemap = mfn_to_virt(_mfn(mfn));
     mfn <<= PAGE_SHIFT;
     size <<= PAGE_SHIFT;
     printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
diff --git a/xen/arch/x86/pv/descriptor-tables.c 
b/xen/arch/x86/pv/descriptor-tables.c
index 8b2d55f..1950c39 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -76,7 +76,7 @@ bool pv_destroy_ldt(struct vcpu *v)
 void pv_destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e = pv_gdt_ptes(v);
-    mfn_t zero_mfn = _mfn(virt_to_mfn(zero_page));
+    mfn_t zero_mfn = virt_to_mfn(zero_page);
     l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
     unsigned int i;
 
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index dc3c1e1..145488d 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -528,7 +528,7 @@ int __init dom0_construct_pv(struct domain *d,
                     free_domheap_pages(page, order);
                     page += 1UL << order;
                 }
-            memcpy(page_to_virt(page), mfn_to_virt(initrd->mod_start),
+            memcpy(page_to_virt(page), __mfn_to_virt(initrd->mod_start),
                    initrd_len);
             mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT;
             init_domheap_pages(mpt_alloc,
@@ -617,8 +617,7 @@ int __init dom0_construct_pv(struct domain *d,
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
-                      d, INVALID_MFN, true);
+    init_xen_l4_slots(l4tab, virt_to_mfn(l4start), d, INVALID_MFN, true);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32bit_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index cdc72f7..b89fa46 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -37,9 +37,6 @@
 
 #include <compat/grant_table.h>
 
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 #ifndef CONFIG_PV_SHIM_EXCLUSIVE
 bool pv_shim;
 boolean_param("pv-shim", pv_shim);
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 9cbff22..f3345d0 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -343,7 +343,7 @@ void *__init bootstrap_map(const module_t *mod)
     void *ret;
 
     if ( system_state != SYS_STATE_early_boot )
-        return mod ? mfn_to_virt(mod->mod_start) : NULL;
+        return mod ? __mfn_to_virt(mod->mod_start) : NULL;
 
     if ( !mod )
     {
@@ -970,7 +970,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
          * This needs to remain in sync with xen_in_range() and the
          * respective reserve_e820_ram() invocation below.
          */
-        mod[mbi->mods_count].mod_start = virt_to_mfn(_stext);
+        mod[mbi->mods_count].mod_start = __virt_to_mfn(_stext);
         mod[mbi->mods_count].mod_end = __2M_rwdata_end - _stext;
     }
 
@@ -1363,7 +1363,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
     {
         set_pdx_range(mod[i].mod_start,
                       mod[i].mod_start + PFN_UP(mod[i].mod_end));
-        map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start),
+        map_pages_to_xen((unsigned long)__mfn_to_virt(mod[i].mod_start),
                          _mfn(mod[i].mod_start),
                          PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR);
     }
@@ -1453,9 +1453,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     numa_initmem_init(0, raw_max_page);
 
-    if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
+    if ( max_page - 1 > __virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
     {
-        unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
+        unsigned long limit = __virt_to_mfn(HYPERVISOR_VIRT_END - 1);
         uint64_t mask = PAGE_SIZE - 1;
 
         if ( !highmem_start )
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 2d70b45..8ca1b65 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -196,7 +196,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit 
*slit)
                return;
        }
        mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1);
-       acpi_slit = mfn_to_virt(mfn_x(mfn));
+       acpi_slit = mfn_to_virt(mfn);
        memcpy(acpi_slit, slit, slit->header.length);
 }
 
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index f3fdee4..221be1d 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -259,7 +259,7 @@ static int mfn_in_guarded_stack(unsigned long mfn)
             continue;
         p = (void *)((unsigned long)stack_base[i] + STACK_SIZE -
                      PRIMARY_STACK_SIZE - PAGE_SIZE);
-        if ( mfn == virt_to_mfn(p) )
+        if ( mfn == mfn_x(virt_to_mfn(p)) )
             return -1;
     }
 
@@ -295,7 +295,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t 
key[TB_KEY_SIZE],
             if ( mfn_in_guarded_stack(mfn) )
                 continue; /* skip guard stack, see memguard_guard_stack() in 
mm.c */
 
-            pg = mfn_to_virt(mfn);
+            pg = mfn_to_virt(_mfn(mfn));
             vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx);
         }
     }
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 11977f2..863c49a 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1366,10 +1366,10 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
         return -EINVAL;
     }
 
-    i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
+    i = mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1)) + 1;
     if ( spfn < i )
     {
-        ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), _mfn(spfn),
+        ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(spfn)), 
_mfn(spfn),
                                min(epfn, i) - spfn, PAGE_HYPERVISOR);
         if ( ret )
             goto destroy_directmap;
@@ -1378,7 +1378,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     {
         if ( i < spfn )
             i = spfn;
-        ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i),
+        ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(i)), _mfn(i),
                                epfn - i, __PAGE_HYPERVISOR_RW);
         if ( ret )
             goto destroy_directmap;
@@ -1469,8 +1469,8 @@ int memory_add(unsigned long spfn, unsigned long epfn, 
unsigned int pxm)
     NODE_DATA(node)->node_start_pfn = old_node_start;
     NODE_DATA(node)->node_spanned_pages = old_node_span;
  destroy_directmap:
-    destroy_xen_mappings((unsigned long)mfn_to_virt(spfn),
-                         (unsigned long)mfn_to_virt(epfn));
+    destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(spfn)),
+                         (unsigned long)mfn_to_virt(_mfn(epfn)));
 
     return ret;
 }
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index d08b627..5500dfb 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -205,7 +205,7 @@ void getdomaininfo(struct domain *d, struct 
xen_domctl_getdomaininfo *info)
     info->outstanding_pages = d->outstanding_pages;
     info->shr_pages         = atomic_read(&d->shr_pages);
     info->paged_pages       = atomic_read(&d->paged_pages);
-    info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info));
+    info->shared_info_frame = mfn_to_gmfn(d, 
mfn_x(virt_to_mfn(d->shared_info)));
     BUG_ON(SHARED_M2P(info->shared_info_frame));
 
     info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c
index 2ed5403..07fe775 100644
--- a/xen/common/efi/boot.c
+++ b/xen/common/efi/boot.c
@@ -1431,7 +1431,7 @@ static __init void copy_mapping(unsigned long mfn, 
unsigned long end,
     {
         l4_pgentry_t l4e = efi_l4_pgtable[l4_table_offset(mfn << PAGE_SHIFT)];
         l3_pgentry_t *l3src, *l3dst;
-        unsigned long va = (unsigned long)mfn_to_virt(mfn);
+        unsigned long va = (unsigned long)mfn_to_virt(_mfn(mfn));
 
         next = mfn + (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT));
         if ( !is_valid(mfn, min(next, end)) )
@@ -1551,9 +1551,10 @@ void __init efi_init_memory(void)
              !(smfn & pfn_hole_mask) &&
              !((smfn ^ (emfn - 1)) & ~pfn_pdx_bottom_mask) )
         {
-            if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END )
+            if ( (unsigned long)mfn_to_virt(_mfn(emfn - 1)) >=
+                 HYPERVISOR_VIRT_END )
                 prot &= ~_PAGE_GLOBAL;
-            if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn),
+            if ( map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(smfn)),
                                   _mfn(smfn), emfn - smfn, prot) == 0 )
                 desc->VirtualStart =
                     (unsigned long)maddr_to_virt(desc->PhysicalStart);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 54f909f..6fc26cf 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3835,7 +3835,7 @@ static int gnttab_get_status_frame_mfn(struct domain *d,
             return -EINVAL;
     }
 
-    *mfn = _mfn(virt_to_mfn(gt->status[idx]));
+    *mfn = virt_to_mfn(gt->status[idx]);
     return 0;
 }
 
@@ -3864,7 +3864,7 @@ static int gnttab_get_shared_frame_mfn(struct domain *d,
             return -EINVAL;
     }
 
-    *mfn = _mfn(virt_to_mfn(gt->shared_raw[idx]));
+    *mfn = virt_to_mfn(gt->shared_raw[idx]);
     return 0;
 }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 08ee8cf..92e1a65 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -253,7 +253,7 @@ static void __init bootmem_region_add(unsigned long s, 
unsigned long e)
     unsigned int i;
 
     if ( (bootmem_region_list == NULL) && (s < e) )
-        bootmem_region_list = mfn_to_virt(s++);
+        bootmem_region_list = mfn_to_virt(_mfn(s++));
 
     if ( s >= e )
         return;
@@ -590,8 +590,8 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
               (!xenheap_bits ||
                !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
-        _heap[node] = mfn_to_virt(mfn + nr - needed);
-        avail[node] = mfn_to_virt(mfn + nr - 1) +
+        _heap[node] = mfn_to_virt(_mfn(mfn + nr - needed));
+        avail[node] = mfn_to_virt(_mfn(mfn + nr - 1)) +
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
     }
     else if ( nr >= needed &&
@@ -599,8 +599,8 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
               (!xenheap_bits ||
                !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
-        _heap[node] = mfn_to_virt(mfn);
-        avail[node] = mfn_to_virt(mfn + needed - 1) +
+        _heap[node] = mfn_to_virt(_mfn(mfn));
+        avail[node] = mfn_to_virt(_mfn(mfn + needed - 1)) +
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
         *use_tail = 0;
     }
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index c077f87..e749cba 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -243,7 +243,7 @@ static void tmem_persistent_pool_page_put(void *page_va)
     struct page_info *pi;
 
     ASSERT(IS_PAGE_ALIGNED(page_va));
-    pi = mfn_to_page(_mfn(virt_to_mfn(page_va)));
+    pi = virt_to_page(page_va);
     ASSERT(IS_VALID_PAGE(pi));
     __tmem_free_page_thispool(pi);
 }
diff --git a/xen/common/trace.c b/xen/common/trace.c
index cc294fc..3e43976 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -217,7 +217,7 @@ static int alloc_trace_bufs(unsigned int pages)
                 t_info_mfn_list[offset + i] = 0;
                 goto out_dealloc;
             }
-            t_info_mfn_list[offset + i] = virt_to_mfn(p);
+            t_info_mfn_list[offset + i] = __virt_to_mfn(p);
         }
     }
 
@@ -233,7 +233,7 @@ static int alloc_trace_bufs(unsigned int pages)
         offset = t_info->mfn_offset[cpu];
 
         /* Initialize the buffer metadata */
-        per_cpu(t_bufs, cpu) = buf = mfn_to_virt(t_info_mfn_list[offset]);
+        per_cpu(t_bufs, cpu) = buf = __mfn_to_virt(t_info_mfn_list[offset]);
         buf->cons = buf->prod = 0;
 
         printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n",
@@ -268,10 +268,12 @@ static int alloc_trace_bufs(unsigned int pages)
             continue;
         for ( i = 0; i < pages; i++ )
         {
-            uint32_t mfn = t_info_mfn_list[offset + i];
-            if ( !mfn )
+            mfn_t mfn = _mfn(t_info_mfn_list[offset + i]);
+
+            if ( !mfn_x(mfn) )
                 break;
-            ASSERT(!(mfn_to_page(_mfn(mfn))->count_info & PGC_allocated));
+
+            ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
             free_xenheap_pages(mfn_to_virt(mfn), 0);
         }
     }
@@ -377,7 +379,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc)
     {
     case XEN_SYSCTL_TBUFOP_get_info:
         tbc->evt_mask   = tb_event_mask;
-        tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
+        tbc->buffer_mfn = t_info ? __virt_to_mfn(t_info) : 0;
         tbc->size = t_info_pages * PAGE_SIZE;
         break;
     case XEN_SYSCTL_TBUFOP_set_cpu_mask:
@@ -511,7 +513,7 @@ static unsigned char *next_record(const struct t_buf *buf, 
uint32_t *next,
     uint16_t per_cpu_mfn_offset;
     uint32_t per_cpu_mfn_nr;
     uint32_t *mfn_list;
-    uint32_t mfn;
+    mfn_t mfn;
     unsigned char *this_page;
 
     barrier(); /* must read buf->prod and buf->cons only once */
@@ -532,7 +534,7 @@ static unsigned char *next_record(const struct t_buf *buf, 
uint32_t *next,
     per_cpu_mfn_nr = x >> PAGE_SHIFT;
     per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
     mfn_list = (uint32_t *)t_info;
-    mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
+    mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr]);
     this_page = mfn_to_virt(mfn);
     if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
     {
@@ -541,7 +543,7 @@ static unsigned char *next_record(const struct t_buf *buf, 
uint32_t *next,
     }
     else
     {
-        mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
+        mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1]);
         *next_page = mfn_to_virt(mfn);
     }
     return this_page;
diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
index 8a72e38..02b015f 100644
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -19,10 +19,6 @@
 #include <xsm/xsm.h>
 #include <xen/hypercall.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 /* Limit amount of pages used for shared buffer (per domain) */
 #define MAX_OPROF_SHARED_PAGES 32
 
diff --git a/xen/drivers/acpi/osl.c b/xen/drivers/acpi/osl.c
index 4c8bb78..ca38565 100644
--- a/xen/drivers/acpi/osl.c
+++ b/xen/drivers/acpi/osl.c
@@ -219,7 +219,7 @@ void *__init acpi_os_alloc_memory(size_t sz)
        void *ptr;
 
        if (system_state == SYS_STATE_early_boot)
-               return mfn_to_virt(mfn_x(alloc_boot_pages(PFN_UP(sz), 1)));
+               return mfn_to_virt(alloc_boot_pages(PFN_UP(sz), 1));
 
        ptr = xmalloc_bytes(sz);
        ASSERT(!ptr || is_xmalloc_memory(ptr));
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index b2f6104..388d353 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -288,8 +288,8 @@ static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t 
*pa,
  * These are overriden in various source files while underscored version
  * remain intact.
  */
-#define virt_to_mfn(va)     __virt_to_mfn(va)
-#define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
+#define virt_to_mfn(va)     _mfn(__virt_to_mfn(va))
+#define mfn_to_virt(mfn)    __mfn_to_virt(mfn_x(mfn))
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)
@@ -307,7 +307,7 @@ static inline struct page_info *virt_to_page(const void *v)
 
 static inline void *page_to_virt(const struct page_info *pg)
 {
-    return mfn_to_virt(mfn_x(page_to_mfn(pg)));
+    return mfn_to_virt(page_to_mfn(pg));
 }
 
 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 1ea173c..bd714d1 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -633,7 +633,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
 {
     unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
 
-    return mfn <= (virt_to_mfn(eva - 1) + 1);
+    return mfn <= (__virt_to_mfn(eva - 1) + 1);
 }
 
 int arch_acquire_resource(struct domain *d, unsigned int type,
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index c1e9293..74ac64a 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -260,8 +260,8 @@ void copy_page_sse2(void *, const void *);
  * overridden in various source files while underscored versions remain intact.
  */
 #define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
-#define virt_to_mfn(va)     __virt_to_mfn(va)
-#define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
+#define virt_to_mfn(va)     _mfn(__virt_to_mfn(va))
+#define mfn_to_virt(mfn)    __mfn_to_virt(mfn_x(mfn))
 #define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
 #define maddr_to_virt(ma)   __maddr_to_virt((unsigned long)(ma))
 #define maddr_to_page(ma)   __maddr_to_page(ma)
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 32669a3..518d874 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -53,14 +53,14 @@ static inline void *__map_domain_page_global(const struct 
page_info *pg)
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                __mfn_to_virt(mfn_x(mfn))
+#define map_domain_page(mfn)                mfn_to_virt(mfn)
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
-#define domain_page_map_to_mfn(va)          _mfn(virt_to_mfn((unsigned 
long)(va)))
+#define domain_page_map_to_mfn(va)          virt_to_mfn(va)
 
 static inline void *map_domain_page_global(mfn_t mfn)
 {
-    return mfn_to_virt(mfn_x(mfn));
+    return mfn_to_virt(mfn);
 }
 
 static inline void *__map_domain_page_global(const struct page_info *pg)
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.