[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 6/7] x86/mm: drop p2mt parameter from map_domain_gfn()
No caller actually consumes it. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v4: New. --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -278,7 +278,6 @@ static int __init pvh_add_mem_range(stru static int __init pvh_setup_vmx_realmode_helpers(struct domain *d) { - p2m_type_t p2mt; uint32_t rc, *ident_pt; mfn_t mfn; paddr_t gaddr; @@ -317,7 +316,7 @@ static int __init pvh_setup_vmx_realmode * superpages. */ ident_pt = map_domain_gfn(p2m_get_hostp2m(d), _gfn(PFN_DOWN(gaddr)), - &mfn, &p2mt, 0, &rc); + &mfn, 0, &rc); if ( ident_pt == NULL ) { printk("Unable to map identity page tables\n"); --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -86,7 +86,6 @@ guest_walk_tables(const struct vcpu *v, gfn_t top_gfn, mfn_t top_mfn, void *top_map) { struct domain *d = v->domain; - p2m_type_t p2mt; guest_l1e_t *l1p = NULL; guest_l2e_t *l2p = NULL; #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ @@ -165,7 +164,6 @@ guest_walk_tables(const struct vcpu *v, l3p = map_domain_gfn(p2m, guest_l4e_get_gfn(gw->l4e), &gw->l3mfn, - &p2mt, qt, &rc); if ( l3p == NULL ) @@ -257,7 +255,6 @@ guest_walk_tables(const struct vcpu *v, l2p = map_domain_gfn(p2m, guest_l3e_get_gfn(gw->l3e), &gw->l2mfn, - &p2mt, qt, &rc); if ( l2p == NULL ) @@ -357,7 +354,6 @@ guest_walk_tables(const struct vcpu *v, l1p = map_domain_gfn(p2m, guest_l2e_get_gfn(gw->l2e), &gw->l1mfn, - &p2mt, qt, &rc); if ( l1p == NULL ) --- a/xen/arch/x86/mm/hap/nested_ept.c +++ b/xen/arch/x86/mm/hap/nested_ept.c @@ -151,7 +151,6 @@ static uint32_t nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw) { int lvl; - p2m_type_t p2mt; uint32_t rc = 0, ret = 0, gflags; struct domain *d = v->domain; struct p2m_domain *p2m = d->arch.p2m; @@ -163,7 +162,7 @@ nept_walk_tables(struct vcpu *v, unsigne for (lvl = 4; lvl > 0; lvl--) { - lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, &p2mt, P2M_ALLOC, &rc); + lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, P2M_ALLOC, &rc); if ( !lxp ) goto map_err; gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)]; --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -2214,8 +2214,9 @@ unsigned long paging_gva_to_gfn(struct v * synthetic/structure PFEC_* bits. */ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, - p2m_type_t *p2mt, p2m_query_t q, uint32_t *pfec) + p2m_query_t q, uint32_t *pfec) { + p2m_type_t p2mt; struct page_info *page; if ( !gfn_valid(p2m->domain, gfn) ) @@ -2225,8 +2226,8 @@ void *map_domain_gfn(struct p2m_domain * } /* Translate the gfn, unsharing if shared. */ - page = p2m_get_page_from_gfn(p2m, gfn, p2mt, NULL, q); - if ( p2m_is_paging(*p2mt) ) + page = p2m_get_page_from_gfn(p2m, gfn, &p2mt, NULL, q); + if ( p2m_is_paging(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); if ( page ) @@ -2235,7 +2236,7 @@ void *map_domain_gfn(struct p2m_domain * *pfec = PFEC_page_paged; return NULL; } - if ( p2m_is_shared(*p2mt) ) + if ( p2m_is_shared(p2mt) ) { if ( page ) put_page(page); --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -762,7 +762,7 @@ int __must_check p2m_set_entry(struct p2 extern void p2m_pt_init(struct p2m_domain *p2m); void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, - p2m_type_t *p2mt, p2m_query_t q, uint32_t *pfec); + p2m_query_t q, uint32_t *pfec); /* Debugging and auditing of the P2M code? */ #ifndef NDEBUG _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |