[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] x86/xsave: fix nonlazy state handling
commit a22a77f036da315b7cb7cefb4eef191e7cd9a247 Author: Liu Jinsong <jinsong.liu@xxxxxxxxx> AuthorDate: Mon Dec 2 15:56:09 2013 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Dec 2 15:56:09 2013 +0100 x86/xsave: fix nonlazy state handling Nonlazy xstates should be xsaved each time when vcpu_save_fpu. Operation to nonlazy xstates will not trigger #NM exception, so whenever vcpu scheduled in it got restored and whenever scheduled out it should get saved. Currently this bug affects AMD LWP feature, and later Intel MPX feature. With the bugfix both LWP and MPX will work fine. Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx> Furthermore, during restore we also need to set nonlazy_xstate_used according to the incoming accumulated XCR0. Also adjust the changes to i387.c such that there won't be a pointless clts()/stts() pair. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> master commit: 7d8b5dd98463524686bdee8b973b53c00c232122 master date: 2013-11-25 11:19:04 +0100 --- xen/arch/x86/domctl.c | 2 ++ xen/arch/x86/hvm/hvm.c | 2 ++ xen/arch/x86/i387.c | 15 +++++++++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index d5a06fc..0592b6e 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1503,6 +1503,8 @@ long arch_do_domctl( { v->arch.xcr0 = _xcr0; v->arch.xcr0_accum = _xcr0_accum; + if ( _xcr0_accum & XSTATE_NONLAZY ) + v->arch.nonlazy_xstate_used = 1; memcpy(v->arch.xsave_area, _xsave_area, evc->size - 2 * sizeof(uint64_t)); } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 6ab812d..3cfce05 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1014,6 +1014,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h) v->arch.xcr0 = ctxt->xcr0; v->arch.xcr0_accum = ctxt->xcr0_accum; + if ( ctxt->xcr0_accum & XSTATE_NONLAZY ) + v->arch.nonlazy_xstate_used = 1; memcpy(v->arch.xsave_area, &ctxt->save_area, desc->length - offsetof(struct hvm_hw_cpu_xsave, save_area)); diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index 7c1feb6..0a001fd 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -122,11 +122,22 @@ static inline void fpu_frstor(struct vcpu *v) /*******************************/ /* FPU Save Functions */ /*******************************/ + +static inline uint64_t vcpu_xsave_mask(const struct vcpu *v) +{ + if ( v->fpu_dirtied ) + return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY; + + return v->arch.nonlazy_xstate_used ? XSTATE_NONLAZY : 0; +} + /* Save x87 extended state */ static inline void fpu_xsave(struct vcpu *v) { bool_t ok; + uint64_t mask = vcpu_xsave_mask(v); + ASSERT(mask); ASSERT(v->arch.xsave_area); /* * XCR0 normally represents what guest OS set. In case of Xen itself, @@ -134,7 +145,7 @@ static inline void fpu_xsave(struct vcpu *v) */ ok = set_xcr0(v->arch.xcr0_accum | XSTATE_FP_SSE); ASSERT(ok); - xsave(v, v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY); + xsave(v, mask); ok = set_xcr0(v->arch.xcr0 ?: XSTATE_FP_SSE); ASSERT(ok); } @@ -267,7 +278,7 @@ void vcpu_restore_fpu_lazy(struct vcpu *v) */ void vcpu_save_fpu(struct vcpu *v) { - if ( !v->fpu_dirtied ) + if ( !v->fpu_dirtied && !v->arch.nonlazy_xstate_used ) return; ASSERT(!is_idle_vcpu(v)); -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |