[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/2] x86/mm: remove some indirection from {paging,sh}_cmpxchg_guest_entry()
Make the functions more similar to cmpxchg() in that they now take an integral "old" input and return the value read. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -398,8 +398,8 @@ int shadow_write_p2m_entry(struct p2m_do /* Functions that atomically write PV guest PT entries */ void sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn); -void sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, - intpte_t new, mfn_t gmfn); +intpte_t sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old, + intpte_t new, mfn_t gmfn); /* Update all the things that are derived from the guest's CR0/CR3/CR4. * Called to initialize paging structures if the paging mode --- a/xen/arch/x86/mm/shadow/pv.c +++ b/xen/arch/x86/mm/shadow/pv.c @@ -39,22 +39,22 @@ sh_write_guest_entry(struct vcpu *v, int /* * Cmpxchg a new value into the guest pagetable, and update the shadows - * appropriately. - * N.B. caller should check the value of "old" to see if the cmpxchg itself - * was successful. + * appropriately. Returns the previous entry found, which the caller is + * expected to check to see if the cmpxchg was successful. */ -void -sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, +intpte_t +sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn) { intpte_t t; paging_lock(v->domain); - t = cmpxchg(p, *old, new); - if ( t == *old ) + t = cmpxchg(p, old, new); + if ( t == old ) sh_validate_guest_entry(v, gmfn, p, sizeof(new)); - *old = t; paging_unlock(v->domain); + + return t; } /* --- a/xen/arch/x86/pv/mm.h +++ b/xen/arch/x86/pv/mm.h @@ -47,16 +47,14 @@ static inline bool update_intpte(intpte_ else #endif { - intpte_t t = old; - for ( ; ; ) { - intpte_t _new = new; + intpte_t _new = new, t; if ( preserve_ad ) _new |= old & (_PAGE_ACCESSED | _PAGE_DIRTY); - paging_cmpxchg_guest_entry(v, p, &t, _new, mfn); + t = paging_cmpxchg_guest_entry(v, p, old, _new, mfn); if ( t == old ) break; --- a/xen/arch/x86/pv/ro-page-fault.c +++ b/xen/arch/x86/pv/ro-page-fault.c @@ -168,8 +168,8 @@ static int ptwr_emulated_update(unsigned if ( p_old ) { ol1e = l1e_from_intpte(old); - paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e), &old, - l1e_get_intpte(nl1e), mfn); + old = paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e), old, + l1e_get_intpte(nl1e), mfn); if ( l1e_get_intpte(ol1e) == old ) ret = X86EMUL_OKAY; else --- a/xen/include/asm-x86/paging.h +++ b/xen/include/asm-x86/paging.h @@ -98,8 +98,8 @@ struct shadow_paging_mode { #ifdef CONFIG_PV void (*write_guest_entry )(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn); - void (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p, - intpte_t *old, intpte_t new, + intpte_t (*cmpxchg_guest_entry )(struct vcpu *v, intpte_t *p, + intpte_t old, intpte_t new, mfn_t gmfn); #endif #ifdef CONFIG_HVM @@ -342,16 +342,15 @@ static inline void paging_write_guest_en * true if not. N.B. caller should check the value of "old" to see if the * cmpxchg itself was successful. */ -static inline void paging_cmpxchg_guest_entry( - struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn) +static inline intpte_t paging_cmpxchg_guest_entry( + struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn) { #ifdef CONFIG_SHADOW_PAGING if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) ) - paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old, - new, gmfn); - else + return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old, + new, gmfn); #endif - *old = cmpxchg(p, *old, new); + return cmpxchg(p, old, new); } #endif /* CONFIG_PV */
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |