[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 19/20] x86/shadow: Alter sh_remove_write_access to take a domain
This allows the removal an improper use of d->vcpu[0] from toolstack context Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> --- xen/arch/x86/mm/shadow/common.c | 7 +++---- xen/arch/x86/mm/shadow/multi.c | 16 ++++++---------- xen/arch/x86/mm/shadow/private.h | 2 +- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index d24859e..4e6397a 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -671,7 +671,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn, ftlb |= oos_fixup_flush_gmfn(v, gmfn, fixup); - switch ( sh_remove_write_access(v, gmfn, 0, 0) ) + switch ( sh_remove_write_access(d, gmfn, 0, 0) ) { default: case 0: @@ -2180,7 +2180,7 @@ static inline void trace_shadow_wrmap_bf(mfn_t gmfn) * level==0 means we have some other reason for revoking write access. * If level==0 we are allowed to fail, returning -1. */ -int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, +int sh_remove_write_access(struct domain *d, mfn_t gmfn, unsigned int level, unsigned long fault_addr) { @@ -2212,7 +2212,6 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, | SHF_L1_64 | SHF_FL1_64 ; - struct domain *d = v->domain; struct page_info *pg = mfn_to_page(gmfn); #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC struct vcpu *curr = current; @@ -3689,7 +3688,7 @@ int shadow_track_dirty_vram(struct domain *d, for ( i = begin_pfn; i < end_pfn; i++ ) { mfn_t mfn = get_gfn_query_unlocked(d, i, &t); if (mfn_x(mfn) != INVALID_MFN) - flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0); + flush_tlb |= sh_remove_write_access(d, mfn, 1, 0); } dirty_vram->last_dirty = -1; } diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 288c7d5..16cd60d 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -278,11 +278,7 @@ shadow_check_gl1e(struct vcpu *v, walk_t *gw) static inline uint32_t gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) { -#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ -#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) struct domain *d = v->domain; -#endif -#endif uint32_t rc = 0; #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ @@ -295,7 +291,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) } else #endif /* OOS */ - if ( sh_remove_write_access(v, gw->l3mfn, 3, va) ) + if ( sh_remove_write_access(d, gw->l3mfn, 3, va) ) rc = GW_RMWR_FLUSHTLB; #endif /* GUEST_PAGING_LEVELS >= 4 */ @@ -307,7 +303,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) } else #endif /* OOS */ - if ( sh_remove_write_access(v, gw->l2mfn, 2, va) ) + if ( sh_remove_write_access(d, gw->l2mfn, 2, va) ) rc |= GW_RMWR_FLUSHTLB; #endif /* GUEST_PAGING_LEVELS >= 3 */ @@ -316,7 +312,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, walk_t *gw) #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) && !mfn_is_out_of_sync(gw->l1mfn) #endif /* OOS */ - && sh_remove_write_access(v, gw->l1mfn, 1, va) ) + && sh_remove_write_access(d, gw->l1mfn, 1, va) ) rc |= GW_RMWR_FLUSHTLB; return rc; @@ -4028,7 +4024,7 @@ sh_update_cr3(struct vcpu *v, int do_locking) * replace the old shadow pagetable(s), so that we can safely use the * (old) shadow linear maps in the writeable mapping heuristics. */ #if GUEST_PAGING_LEVELS == 2 - if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 ) + if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 ) flush_tlb_mask(d->domain_dirty_cpumask); sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow); #elif GUEST_PAGING_LEVELS == 3 @@ -4048,7 +4044,7 @@ sh_update_cr3(struct vcpu *v, int do_locking) gl2gfn = guest_l3e_get_gfn(gl3e[i]); gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt); if ( p2m_is_ram(p2mt) ) - flush |= sh_remove_write_access(v, gl2mfn, 2, 0); + flush |= sh_remove_write_access(d, gl2mfn, 2, 0); } } if ( flush ) @@ -4072,7 +4068,7 @@ sh_update_cr3(struct vcpu *v, int do_locking) } } #elif GUEST_PAGING_LEVELS == 4 - if ( sh_remove_write_access(v, gmfn, 4, 0) != 0 ) + if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 ) flush_tlb_mask(d->domain_dirty_cpumask); sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow); #else diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h index 96b53b9..1bf1deb 100644 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -374,7 +374,7 @@ void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, * Returns non-zero if we need to flush TLBs. * level and fault_addr desribe how we found this to be a pagetable; * level==0 means we have some other reason for revoking write access. */ -extern int sh_remove_write_access(struct vcpu *v, mfn_t readonly_mfn, +extern int sh_remove_write_access(struct domain *d, mfn_t readonly_mfn, unsigned int level, unsigned long fault_addr); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |