[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] x86/P2M: p2m_change_type() should pass on error from p2m_set_entry()
Modify the function's name to help eventual backports involving this function, and in one case where this is trivially possible also stop ignoring its return value. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Split out from the previous patch to follow Tim's request to have the function return an error indication rather than the old type, in turn making it desirable to rename it so that eventual backport oversights can be avoided. --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1334,9 +1334,10 @@ long arch_do_domctl( unsigned long pfn = domctl->u.set_broken_page_p2m.pfn; mfn_t mfn = get_gfn_query(d, pfn, &pt); - if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) || - (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) ) + if ( unlikely(!mfn_valid(mfn_x(mfn))) || unlikely(!p2m_is_ram(pt)) ) ret = -EINVAL; + else + ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken); put_gfn(d, pfn); } --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1698,7 +1698,7 @@ int hvm_hap_nested_page_fault(paddr_t gp if ( access_w ) { paging_mark_dirty(v->domain, mfn_x(mfn)); - p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw); + p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw); } rc = 1; goto out_put_gfn; @@ -4540,7 +4540,7 @@ long do_hvm_op(unsigned long op, XEN_GUE while ( a.nr > start_iter ) { unsigned long pfn = a.first_pfn + start_iter; - p2m_type_t t, nt; + p2m_type_t t; get_gfn_unshare(d, pfn, &t); if ( p2m_is_paging(t) ) @@ -4563,16 +4563,10 @@ long do_hvm_op(unsigned long op, XEN_GUE goto param_fail4; } - nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]); - if ( nt != t ) - { - put_gfn(d, pfn); - printk(XENLOG_G_WARNING - "d%d: GFN %#lx type changed from %d to %d while trying to change it to %d\n", - d->domain_id, pfn, t, nt, memtype[a.hvmmem_type]); - goto param_fail4; - } + rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]); put_gfn(d, pfn); + if ( rc ) + goto param_fail4; /* Check for continuation if it's not the last interation */ if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) && --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -913,7 +913,7 @@ int mem_sharing_nominate_page(struct dom } /* Change the p2m type, should never fail with p2m locked. */ - BUG_ON(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt); + BUG_ON(p2m_change_type_one(d, gfn, p2mt, p2m_ram_shared)); /* Account for this page. */ atomic_inc(&nr_shared_mfns); @@ -1236,8 +1236,7 @@ int __mem_sharing_unshare_page(struct do put_page_and_type(old_page); private_page_found: - if ( p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) != - p2m_ram_shared ) + if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) ) { gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", d->domain_id, gfn); --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -704,27 +704,33 @@ out: } -/* Modify the p2m type of a single gfn from ot to nt, returning the - * entry's previous type. Resets the access permissions. */ -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, - p2m_type_t ot, p2m_type_t nt) +/* + * Modify the p2m type of a single gfn from ot to nt. + * Returns: 0 for success, -errno for failure. + * Resets the access permissions. + */ +int p2m_change_type_one(struct domain *d, unsigned long gfn, + p2m_type_t ot, p2m_type_t nt) { p2m_access_t a; p2m_type_t pt; mfn_t mfn; struct p2m_domain *p2m = p2m_get_hostp2m(d); + int rc; BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL); - if ( pt == ot ) - p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access); + rc = likely(pt == ot) + ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, + p2m->default_access) + : -EBUSY; gfn_unlock(p2m, gfn, 0); - return pt; + return rc; } /* Modify the p2m type of a range of gfns from ot to nt. */ --- a/xen/arch/x86/mm/paging.c +++ b/xen/arch/x86/mm/paging.c @@ -469,12 +469,8 @@ void paging_log_dirty_range(struct domai p2m_lock(p2m); for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ ) - { - p2m_type_t pt; - pt = p2m_change_type(d, pfn, p2m_ram_rw, p2m_ram_logdirty); - if ( pt == p2m_ram_rw ) + if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) ) dirty_bitmap[i >> 3] |= (1 << (i & 7)); - } p2m_unlock(p2m); --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -444,8 +444,7 @@ int unmmap_broken_page(struct domain *d, if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES) { ASSERT(mfn_x(r_mfn) == mfn_x(mfn)); - p2m_change_type(d, gfn, pt, p2m_ram_broken); - rc = 0; + rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken); } put_gfn(d, gfn); --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -823,7 +823,7 @@ int guest_iommu_set_base(struct domain * unsigned long gfn = base + i; get_gfn_query(d, gfn, &t); - p2m_change_type(d, gfn, t, p2m_mmio_dm); + p2m_change_type_one(d, gfn, t, p2m_mmio_dm); put_gfn(d, gfn); } --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -519,8 +519,8 @@ void p2m_change_type_range(struct domain p2m_type_t ot, p2m_type_t nt); /* Compare-exchange the type of a single p2m entry */ -p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, - p2m_type_t ot, p2m_type_t nt); +int p2m_change_type_one(struct domain *d, unsigned long gfn, + p2m_type_t ot, p2m_type_t nt); /* Report a change affecting memory types. */ void p2m_memory_type_changed(struct domain *d); Attachment:
x86-p2m-change-type-check-set-entry.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |