|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/5] x86: make get_page_from_mfn() return struct page_info *
Almost all users of it want it, and it calculates it anyway.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -704,7 +704,6 @@ get_##level##_linear_pagetable(
level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \
{ \
unsigned long x, y; \
- struct page_info *page; \
unsigned long pfn; \
\
if ( !opt_pv_linear_pt ) \
@@ -723,14 +722,15 @@ get_##level##_linear_pagetable(
\
if ( (pfn = level##e_get_pfn(pde)) != pde_pfn ) \
{ \
- struct page_info *ptpg = mfn_to_page(_mfn(pde_pfn)); \
+ struct page_info *page, *ptpg = mfn_to_page(_mfn(pde_pfn)); \
\
/* Make sure the page table belongs to the correct domain. */ \
if ( unlikely(page_get_owner(ptpg) != d) ) \
return 0; \
\
/* Make sure the mapped frame belongs to the correct domain. */ \
- if ( unlikely(!get_page_from_mfn(_mfn(pfn), d)) ) \
+ page = get_page_from_mfn(_mfn(pfn), d); \
+ if ( unlikely(!page) ) \
return 0; \
\
/* \
@@ -740,7 +740,6 @@ get_##level##_linear_pagetable(
* elsewhere. \
* If so, atomically increment the count (checking for overflow). \
*/ \
- page = mfn_to_page(_mfn(pfn)); \
if ( !inc_linear_entries(ptpg) ) \
{ \
put_page(page); \
@@ -3724,7 +3723,8 @@ long do_mmu_update(
xsm_checked = xsm_needed;
}
- if ( unlikely(!get_page_from_mfn(_mfn(mfn), pg_owner)) )
+ page = get_page_from_mfn(_mfn(mfn), pg_owner);
+ if ( unlikely(!page) )
{
gdprintk(XENLOG_WARNING,
"Could not get page for mach->phys update\n");
@@ -3736,7 +3736,7 @@ long do_mmu_update(
paging_mark_dirty(pg_owner, _mfn(mfn));
- put_page(mfn_to_page(_mfn(mfn)));
+ put_page(page);
break;
default:
@@ -3921,10 +3921,10 @@ static int __do_update_va_mapping(
rc = -EINVAL;
pl1e = map_guest_l1e(va, &gl1mfn);
- if ( unlikely(!pl1e || !get_page_from_mfn(gl1mfn, d)) )
+ gl1pg = pl1e ? get_page_from_mfn(gl1mfn, d) : NULL;
+ if ( unlikely(!gl1pg) )
goto out;
- gl1pg = mfn_to_page(gl1mfn);
if ( !page_lock(gl1pg) )
{
put_page(gl1pg);
@@ -4120,10 +4120,10 @@ int xenmem_add_to_physmap_one(
put_gfn(d, gfn);
return -ENOMEM;
}
- if ( !get_page_from_mfn(_mfn(idx), d) )
- break;
mfn = _mfn(idx);
- page = mfn_to_page(mfn);
+ page = get_page_from_mfn(mfn, d);
+ if ( unlikely(!page) )
+ mfn = INVALID_MFN;
break;
}
case XENMAPSPACE_gmfn_foreign:
--- a/xen/arch/x86/pv/grant_table.c
+++ b/xen/arch/x86/pv/grant_table.c
@@ -80,7 +80,8 @@ int create_grant_pv_mapping(uint64_t add
gl1mfn = _mfn(addr >> PAGE_SHIFT);
- if ( !get_page_from_mfn(gl1mfn, currd) )
+ page = get_page_from_mfn(gl1mfn, currd);
+ if ( !page )
goto out;
pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
@@ -101,11 +102,11 @@ int create_grant_pv_mapping(uint64_t add
goto out;
}
- if ( !get_page_from_mfn(gl1mfn, currd) )
+ page = get_page_from_mfn(gl1mfn, currd);
+ if ( !page )
goto out_unmap;
}
- page = mfn_to_page(gl1mfn);
if ( !page_lock(page) )
goto out_put;
@@ -159,10 +160,10 @@ static bool steal_linear_address(unsigne
goto out;
}
- if ( !get_page_from_mfn(gl1mfn, currd) )
+ page = get_page_from_mfn(gl1mfn, currd);
+ if ( !page )
goto out_unmap;
- page = mfn_to_page(gl1mfn);
if ( !page_lock(page) )
goto out_put;
@@ -235,7 +236,8 @@ int replace_grant_pv_mapping(uint64_t ad
gl1mfn = _mfn(addr >> PAGE_SHIFT);
- if ( !get_page_from_mfn(gl1mfn, currd) )
+ page = get_page_from_mfn(gl1mfn, currd);
+ if ( !page )
goto out;
pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
@@ -263,12 +265,11 @@ int replace_grant_pv_mapping(uint64_t ad
if ( !pl1e )
goto out;
- if ( !get_page_from_mfn(gl1mfn, currd) )
+ page = get_page_from_mfn(gl1mfn, currd);
+ if ( !page )
goto out_unmap;
}
- page = mfn_to_page(gl1mfn);
-
if ( !page_lock(page) )
goto out_put;
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -253,10 +253,10 @@ static int ptwr_do_page_fault(struct x86
struct page_info *page;
int rc;
- if ( !get_page_from_mfn(l1e_get_mfn(pte), current->domain) )
+ page = get_page_from_mfn(l1e_get_mfn(pte), current->domain);
+ if ( !page )
return X86EMUL_UNHANDLEABLE;
- page = l1e_get_page(pte);
if ( !page_lock(page) )
{
put_page(page);
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -382,7 +382,7 @@ int get_page_from_l1e(
l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
-static inline bool get_page_from_mfn(mfn_t mfn, struct domain *d)
+static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d)
{
struct page_info *page = __mfn_to_page(mfn_x(mfn));
@@ -390,10 +390,10 @@ static inline bool get_page_from_mfn(mfn
{
gdprintk(XENLOG_WARNING,
"Could not get page ref for mfn %"PRI_mfn"\n", mfn_x(mfn));
- return false;
+ return NULL;
}
- return true;
+ return page;
}
static inline void put_page_and_type(struct page_info *page)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |