[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [for-4.11][PATCH v7 15/16] xen/x86: Switch mfn_to_page in x86_64/mm.c to use typesafe MFN
Other than MFN 0 -> INVALID_MFN, no functional change intended. Signed-off-by: Julien Grall <julien.grall@xxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Changes in v6: - s/_mfn(0)/MFN 0/ - Fix typo in the commit message Changes in v5: - Use INVALID_MFN instead of _mfn(0) Changes in v4: - Patch added --- xen/arch/x86/x86_64/mm.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index f6dd95aa47..8d1f130abf 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -43,6 +43,8 @@ asm(".file \"" __FILE__ "\""); /* Override macros from asm/page.h to make them work with mfn_t */ #undef page_to_mfn #define page_to_mfn(pg) _mfn(__page_to_mfn(pg)) +#undef mfn_to_page +#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn)) unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START; @@ -160,7 +162,8 @@ static int m2p_mapped(unsigned long spfn) static int share_hotadd_m2p_table(struct mem_hotadd_info *info) { - unsigned long i, n, v, m2p_start_mfn = 0; + unsigned long i, n, v; + mfn_t m2p_start_mfn = INVALID_MFN; l3_pgentry_t l3e; l2_pgentry_t l2e; @@ -180,15 +183,16 @@ static int share_hotadd_m2p_table(struct mem_hotadd_info *info) l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) continue; - m2p_start_mfn = l2e_get_pfn(l2e); + m2p_start_mfn = l2e_get_mfn(l2e); } else continue; for ( i = 0; i < n; i++ ) { - struct page_info *page = mfn_to_page(m2p_start_mfn + i); - if (hotadd_mem_valid(m2p_start_mfn + i, info)) + struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i)); + + if ( hotadd_mem_valid(mfn_x(mfn_add(m2p_start_mfn, i)), info) ) share_xen_page_with_privileged_guests(page, SHARE_ro); } } @@ -204,12 +208,13 @@ static int share_hotadd_m2p_table(struct mem_hotadd_info *info) l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) continue; - m2p_start_mfn = l2e_get_pfn(l2e); + m2p_start_mfn = l2e_get_mfn(l2e); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { - struct page_info *page = mfn_to_page(m2p_start_mfn + i); - if (hotadd_mem_valid(m2p_start_mfn + i, info)) + struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i)); + + if ( hotadd_mem_valid(mfn_x(mfn_add(m2p_start_mfn, i)), info) ) share_xen_page_with_privileged_guests(page, SHARE_ro); } } @@ -720,10 +725,10 @@ static void cleanup_frame_table(struct mem_hotadd_info *info) unsigned long sva, eva; l3_pgentry_t l3e; l2_pgentry_t l2e; - unsigned long spfn, epfn; + mfn_t spfn, epfn; - spfn = info->spfn; - epfn = info->epfn; + spfn = _mfn(info->spfn); + epfn = _mfn(info->epfn); sva = (unsigned long)mfn_to_page(spfn); eva = (unsigned long)mfn_to_page(epfn); @@ -795,16 +800,17 @@ static int setup_frametable_chunk(void *start, void *end, static int extend_frame_table(struct mem_hotadd_info *info) { - unsigned long cidx, nidx, eidx, spfn, epfn; + unsigned long cidx, nidx, eidx; + mfn_t spfn, epfn; - spfn = info->spfn; - epfn = info->epfn; + spfn = _mfn(info->spfn); + epfn = _mfn(info->epfn); - eidx = (pfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT; - nidx = cidx = pfn_to_pdx(spfn)/PDX_GROUP_COUNT; + eidx = (mfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT; + nidx = cidx = mfn_to_pdx(spfn)/PDX_GROUP_COUNT; - ASSERT( pfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) && - pfn_to_pdx(epfn) <= FRAMETABLE_NR ); + ASSERT( mfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) && + mfn_to_pdx(epfn) <= FRAMETABLE_NR ); if ( test_bit(cidx, pdx_group_valid) ) cidx = find_next_zero_bit(pdx_group_valid, eidx, cidx); @@ -866,7 +872,7 @@ void __init subarch_init_memory(void) for ( i = 0; i < n; i++ ) share_xen_page_with_privileged_guests( - mfn_to_page(m2p_start_mfn + i), SHARE_ro); + mfn_to_page(_mfn(m2p_start_mfn + i)), SHARE_ro); } for ( v = RDWR_COMPAT_MPT_VIRT_START; @@ -884,7 +890,7 @@ void __init subarch_init_memory(void) for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) share_xen_page_with_privileged_guests( - mfn_to_page(m2p_start_mfn + i), SHARE_ro); + mfn_to_page(_mfn(m2p_start_mfn + i)), SHARE_ro); } /* Mark all of direct map NX if hardware supports it. */ @@ -1270,7 +1276,7 @@ static int transfer_pages_to_heap(struct mem_hotadd_info *info) */ for (i = info->spfn; i < info->cur; i++) { - pg = mfn_to_page(i); + pg = mfn_to_page(_mfn(i)); pg->count_info = PGC_state_inuse; } -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |