[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.9] x86/vmx: Fix vmentry failure because of invalid LER on Broadwell
commit 1b0029cf6d07f2bd49ac57022274c903472af820 Author: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx> AuthorDate: Wed Dec 20 15:49:23 2017 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Dec 20 15:49:23 2017 +0100 x86/vmx: Fix vmentry failure because of invalid LER on Broadwell Occasionally, on certain Broadwell CPUs MSR_IA32_LASTINTTOIP has been observed to have the top three bits corrupted as though the MSR is using the LBR_FORMAT_EIP_FLAGS_TSX format. This is incorrect and causes a vmentry failure -- the MSR should contain an offset into the current code segment. This is assumed to be erratum BDF14. Workaround the issue by sign-extending into bits 48:63 for MSR_IA32_LASTINT{FROM,TO}IP. Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> master commit: 20f1976b44199d1e7a15fe5d2c8c1a4375b74997 master date: 2017-06-07 12:00:41 +0100 --- xen/arch/x86/hvm/vmx/vmx.c | 62 +++++++++++++++++++++++++++++++++++--- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 +- xen/include/asm-x86/x86_64/page.h | 3 ++ 3 files changed, 62 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index bcbb746..1fe6fde 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2433,6 +2433,7 @@ static void pi_notification_interrupt(struct cpu_user_regs *regs) } static void __init lbr_tsx_fixup_check(void); +static void __init bdw_erratum_bdf14_fixup_check(void); const struct hvm_function_table * __init start_vmx(void) { @@ -2498,6 +2499,7 @@ const struct hvm_function_table * __init start_vmx(void) setup_vmcs_dump(); lbr_tsx_fixup_check(); + bdw_erratum_bdf14_fixup_check(); return &vmx_function_table; } @@ -2790,7 +2792,11 @@ enum #define LBR_FROM_SIGNEXT_2MSB ((1ULL << 59) | (1ULL << 60)) +#define FIXUP_LBR_TSX (1u << 0) +#define FIXUP_BDW_ERRATUM_BDF14 (1u << 1) + static bool __read_mostly lbr_tsx_fixup_needed; +static bool __read_mostly bdw_erratum_bdf14_fixup_needed; static uint32_t __read_mostly lbr_from_start; static uint32_t __read_mostly lbr_from_end; static uint32_t __read_mostly lbr_lastint_from; @@ -2827,6 +2833,13 @@ static void __init lbr_tsx_fixup_check(void) } } +static void __init bdw_erratum_bdf14_fixup_check(void) +{ + /* Broadwell E5-2600 v4 processors need to work around erratum BDF14. */ + if ( boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 79 ) + bdw_erratum_bdf14_fixup_needed = true; +} + static int is_last_branch_msr(u32 ecx) { const struct lbr_info *lbr = last_branch_msr_get(); @@ -3086,8 +3099,11 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 ) { vmx_disable_intercept_for_msr(v, lbr->base + i, MSR_TYPE_R | MSR_TYPE_W); - v->arch.hvm_vmx.lbr_tsx_fixup_enabled = - lbr_tsx_fixup_needed; + if ( lbr_tsx_fixup_needed ) + v->arch.hvm_vmx.lbr_fixup_enabled |= FIXUP_LBR_TSX; + if ( bdw_erratum_bdf14_fixup_needed ) + v->arch.hvm_vmx.lbr_fixup_enabled |= + FIXUP_BDW_ERRATUM_BDF14; } } @@ -4168,6 +4184,44 @@ static void lbr_tsx_fixup(void) msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2); } +static void sign_extend_msr(u32 msr, int type) +{ + struct vmx_msr_entry *entry; + + if ( (entry = vmx_find_msr(msr, type)) != NULL ) + { + if ( entry->data & VADDR_TOP_BIT ) + entry->data |= CANONICAL_MASK; + else + entry->data &= ~CANONICAL_MASK; + } +} + +static void bdw_erratum_bdf14_fixup(void) +{ + /* + * Occasionally, on certain Broadwell CPUs MSR_IA32_LASTINTTOIP has + * been observed to have the top three bits corrupted as though the + * MSR is using the LBR_FORMAT_EIP_FLAGS_TSX format. This is + * incorrect and causes a vmentry failure -- the MSR should contain + * an offset into the current code segment. This is assumed to be + * erratum BDF14. Fix up MSR_IA32_LASTINT{FROM,TO}IP by + * sign-extending into bits 48:63. + */ + sign_extend_msr(MSR_IA32_LASTINTFROMIP, VMX_GUEST_MSR); + sign_extend_msr(MSR_IA32_LASTINTTOIP, VMX_GUEST_MSR); +} + +static void lbr_fixup(void) +{ + struct vcpu *curr = current; + + if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_LBR_TSX ) + lbr_tsx_fixup(); + if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_BDW_ERRATUM_BDF14 ) + bdw_erratum_bdf14_fixup(); +} + void vmx_vmenter_helper(const struct cpu_user_regs *regs) { struct vcpu *curr = current; @@ -4224,8 +4278,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs) } out: - if ( unlikely(curr->arch.hvm_vmx.lbr_tsx_fixup_enabled) ) - lbr_tsx_fixup(); + if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) ) + lbr_fixup(); HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 9507bd2..e3cdfdf 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -136,7 +136,7 @@ struct arch_vmx_struct { /* Are we emulating rather than VMENTERing? */ uint8_t vmx_emulate; - bool lbr_tsx_fixup_enabled; + uint8_t lbr_fixup_enabled; /* Bitmask of segments that we can't safely use in virtual 8086 mode */ uint16_t vm86_segment_mask; diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h index 1a6cae6..1fbd2c1 100644 --- a/xen/include/asm-x86/x86_64/page.h +++ b/xen/include/asm-x86/x86_64/page.h @@ -28,6 +28,9 @@ #define PADDR_MASK ((1UL << PADDR_BITS)-1) #define VADDR_MASK ((1UL << VADDR_BITS)-1) +#define VADDR_TOP_BIT (1UL << (VADDR_BITS - 1)) +#define CANONICAL_MASK (~0UL & ~VADDR_MASK) + #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63)) #ifndef __ASSEMBLY__ -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.9 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |