[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 04/20] x86/shadow: Change the gating of shadow heuristics
Each of these functions will have their vcpu paramters replaced with domain parameters because they are part of domain-generic rather than vcpu specific codepaths, which means that the use of 'v' will have to change. 'current' can be used to obtain a vcpu when in an appropriate context. The 'curr->domain == d' test is less restrictive than 'v == current'. The end result is still safe as the code still only runs in the context of the correct domain, but is now valid to run in cases where previously 'v' was some other vcpu in the same domin. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Tim Deegan <tim@xxxxxxx> CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/mm/shadow/common.c | 21 +++++++++++++-------- xen/arch/x86/mm/shadow/multi.c | 13 +++++++++---- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 3b5ef19..26dab30 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -2170,6 +2170,9 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, ; struct domain *d = v->domain; struct page_info *pg = mfn_to_page(gmfn); +#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC + struct vcpu *curr = current; +#endif ASSERT(paging_locked_by_me(d)); @@ -2205,7 +2208,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, } #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC - if ( v == current ) + if ( curr->domain == d ) { unsigned long gfn; /* Heuristic: there is likely to be only one writeable mapping, @@ -2213,7 +2216,8 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, * in the guest's linear map (on non-HIGHPTE linux and windows)*/ #define GUESS(_a, _h) do { \ - if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \ + if ( curr->arch.paging.mode->shadow.guess_wrmap( \ + curr, (_a), gmfn) ) \ perfc_incr(shadow_writeable_h_ ## _h); \ if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ { \ @@ -2222,7 +2226,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, } \ } while (0) - if ( v->arch.paging.mode->guest_levels == 2 ) + if ( curr->arch.paging.mode->guest_levels == 2 ) { if ( level == 1 ) /* 32bit non-PAE w2k3: linear map at 0xC0000000 */ @@ -2237,7 +2241,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, GUESS(0xBFC00000UL + ((fault_addr & VADDR_MASK) >> 10), 6); } - else if ( v->arch.paging.mode->guest_levels == 3 ) + else if ( curr->arch.paging.mode->guest_levels == 3 ) { /* 32bit PAE w2k3: linear map at 0xC0000000 */ switch ( level ) @@ -2259,7 +2263,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, + ((fault_addr & VADDR_MASK) >> 18), 6); break; } } - else if ( v->arch.paging.mode->guest_levels == 4 ) + else if ( curr->arch.paging.mode->guest_levels == 4 ) { /* 64bit w2k3: linear map at 0xfffff68000000000 */ switch ( level ) @@ -2312,14 +2316,15 @@ int sh_remove_write_access(struct vcpu *v, mfn_t gmfn, * the writeable mapping by looking at the same MFN where the last * brute-force search succeeded. */ - if ( v->arch.paging.shadow.last_writeable_pte_smfn != 0 ) + if ( (curr->domain == d) && + (curr->arch.paging.shadow.last_writeable_pte_smfn != 0) ) { unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask); - mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn); + mfn_t last_smfn = _mfn(curr->arch.paging.shadow.last_writeable_pte_smfn); int shtype = mfn_to_page(last_smfn)->u.sh.type; if ( callbacks[shtype] ) - callbacks[shtype](v, last_smfn, gmfn); + callbacks[shtype](curr, last_smfn, gmfn); if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count ) perfc_incr(shadow_writeable_h_5); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index b538997..f532bff 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -4185,6 +4185,8 @@ static int sh_page_fault(struct vcpu *v, int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long off) { + struct domain *d = v->domain; + struct vcpu *curr = current; int r; shadow_l1e_t *sl1p, sl1e; struct page_info *sp; @@ -4193,9 +4195,9 @@ int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, ASSERT(mfn_valid(smfn)); /* Remember if we've been told that this process is being torn down */ - v->arch.paging.shadow.pagetable_dying - = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying); - + if ( curr->domain == d ) + curr->arch.paging.shadow.pagetable_dying + = !!(mfn_to_page(gmfn)->shadow_flags & SHF_pagetable_dying); sp = mfn_to_page(smfn); @@ -4290,6 +4292,8 @@ int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t sl1mfn, int done = 0; int flags; #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC + struct domain *d = v->domain; + struct vcpu *curr = current; mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */ #endif @@ -4304,7 +4308,8 @@ int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t sl1mfn, (void) shadow_set_l1e(v, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn); #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC /* Remember the last shadow that we shot a writeable mapping in */ - v->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn); + if ( curr->domain == d ) + curr->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn); #endif if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info & PGT_count_mask) == 0 ) -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |