[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.9] x86/vmx: Improvements to LBR MSR handling
commit db356a429b40e6191e215433872fe1ab481cdeac Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Mon May 7 11:57:00 2018 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Aug 14 12:33:45 2018 +0100 x86/vmx: Improvements to LBR MSR handling The main purpose of this patch is to only ever insert the LBR MSRs into the guest load/save list once, as a future patch wants to change the behaviour of vmx_add_guest_msr(). The repeated processing of lbr_info and the guests MSR load/save list is redundant, and a guest using LBR itself will have to re-enable MSR_DEBUGCTL.LBR in its #DB handler, meaning that Xen will repeat this redundant processing every time the guest gets a debug exception. Rename lbr_fixup_enabled to lbr_flags to be a little more generic, and use one bit to indicate that the MSRs have been inserted into the load/save list. Shorten the existing FIXUP* identifiers to reduce code volume. Furthermore, handing the guest #MC on an error isn't a legitimate action. Two of the three failure cases are definitely hypervisor bugs, and the third is a boundary case which shouldn't occur in practice. The guest also won't execute correctly, so handle errors by cleanly crashing the guest. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> (cherry picked from commit be73a842e642772d7372004c9c105de35b771020) --- xen/arch/x86/hvm/vmx/vmx.c | 82 +++++++++++++++++++++++++++----------- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 +- 2 files changed, 60 insertions(+), 24 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 6e1658cdee..251e6e286f 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2838,8 +2838,10 @@ enum #define LBR_FROM_SIGNEXT_2MSB ((1ULL << 59) | (1ULL << 60)) -#define FIXUP_LBR_TSX (1u << 0) -#define FIXUP_BDW_ERRATUM_BDF14 (1u << 1) +#define LBR_MSRS_INSERTED (1u << 0) +#define LBR_FIXUP_TSX (1u << 1) +#define LBR_FIXUP_BDF14 (1u << 2) +#define LBR_FIXUP_MASK (LBR_FIXUP_TSX | LBR_FIXUP_BDF14) static bool __read_mostly lbr_tsx_fixup_needed; static bool __read_mostly bdw_erratum_bdf14_fixup_needed; @@ -3125,7 +3127,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) __vmwrite(GUEST_SYSENTER_EIP, msr_content); break; case MSR_IA32_DEBUGCTLMSR: { - int i, rc = 0; uint64_t supported = IA32_DEBUGCTLMSR_LBR | IA32_DEBUGCTLMSR_BTF; if ( boot_cpu_has(X86_FEATURE_RTM) ) @@ -3136,30 +3137,65 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( vpmu_do_wrmsr(msr, msr_content, supported) ) break; } - if ( msr_content & IA32_DEBUGCTLMSR_LBR ) + + /* + * When a guest first enables LBR, arrange to save and restore the LBR + * MSRs and allow the guest direct access. + * + * MSR_DEBUGCTL and LBR has existed almost as long as MSRs have + * existed, and there is no architectural way to hide the feature, or + * fail the attempt to enable LBR. + * + * Unknown host LBR MSRs or hitting -ENOSPC with the guest load/save + * list are definitely hypervisor bugs, whereas -ENOMEM for allocating + * the load/save list is simply unlucky (and shouldn't occur with + * sensible management by the toolstack). + * + * Either way, there is nothing we can do right now to recover, and + * the guest won't execute correctly either. Simply crash the domain + * to make the failure obvious. + */ + if ( !(v->arch.hvm_vmx.lbr_flags & LBR_MSRS_INSERTED) && + (msr_content & IA32_DEBUGCTLMSR_LBR) ) { const struct lbr_info *lbr = last_branch_msr_get(); - if ( lbr == NULL ) - break; - for ( ; (rc == 0) && lbr->count; lbr++ ) - for ( i = 0; (rc == 0) && (i < lbr->count); i++ ) - if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 ) + if ( unlikely(!lbr) ) + { + gprintk(XENLOG_ERR, "Unknown Host LBR MSRs\n"); + domain_crash(v->domain); + return X86EMUL_OKAY; + } + + for ( ; lbr->count; lbr++ ) + { + unsigned int i; + + for ( i = 0; i < lbr->count; i++ ) + { + int rc = vmx_add_guest_msr(v, lbr->base + i); + + if ( unlikely(rc) ) { - vmx_disable_intercept_for_msr(v, lbr->base + i, MSR_TYPE_R | MSR_TYPE_W); - if ( lbr_tsx_fixup_needed ) - v->arch.hvm_vmx.lbr_fixup_enabled |= FIXUP_LBR_TSX; - if ( bdw_erratum_bdf14_fixup_needed ) - v->arch.hvm_vmx.lbr_fixup_enabled |= - FIXUP_BDW_ERRATUM_BDF14; + gprintk(XENLOG_ERR, + "Guest load/save list error %d\n", rc); + domain_crash(v->domain); + return X86EMUL_OKAY; } - } - if ( rc < 0 ) - hvm_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC); - else - __vmwrite(GUEST_IA32_DEBUGCTL, msr_content); + vmx_disable_intercept_for_msr(v, lbr->base + i, + MSR_TYPE_R | MSR_TYPE_W); + } + } + + v->arch.hvm_vmx.lbr_flags |= LBR_MSRS_INSERTED; + if ( lbr_tsx_fixup_needed ) + v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_TSX; + if ( bdw_erratum_bdf14_fixup_needed ) + v->arch.hvm_vmx.lbr_flags |= LBR_FIXUP_BDF14; + } + __vmwrite(GUEST_IA32_DEBUGCTL, msr_content); break; } case MSR_IA32_FEATURE_CONTROL: @@ -4273,9 +4309,9 @@ static void lbr_fixup(void) { struct vcpu *curr = current; - if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_LBR_TSX ) + if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_TSX ) lbr_tsx_fixup(); - if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_BDW_ERRATUM_BDF14 ) + if ( curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_BDF14 ) bdw_erratum_bdf14_fixup(); } @@ -4335,7 +4371,7 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs) } out: - if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) ) + if ( unlikely(curr->arch.hvm_vmx.lbr_flags & LBR_FIXUP_MASK) ) lbr_fixup(); HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index a87462b9ef..6ed1117505 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -143,7 +143,7 @@ struct arch_vmx_struct { /* Are we emulating rather than VMENTERing? */ uint8_t vmx_emulate; - uint8_t lbr_fixup_enabled; + uint8_t lbr_flags; /* Bitmask of segments that we can't safely use in virtual 8086 mode */ uint16_t vm86_segment_mask; -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.9 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |