[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 3/3] x86/vmx: fix vmentry failure with TSX bits in LBR
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: Tuesday, February 21, 2017 7:25 PM > > On 21/02/17 09:13, Tian, Kevin wrote: > >> @@ -2618,6 +2630,56 @@ static const struct lbr_info > >> *last_branch_msr_get(void) > >> return NULL; > >> } > >> > >> +enum > >> +{ > >> + LBR_FORMAT_32 = 0x0, /* 32-bit record format */ > >> + LBR_FORMAT_LIP = 0x1, /* 64-bit LIP record format */ > >> + LBR_FORMAT_EIP = 0x2, /* 64-bit EIP record format */ > >> + LBR_FORMAT_EIP_FLAGS = 0x3, /* 64-bit EIP, Flags */ > >> + LBR_FORMAT_EIP_FLAGS_TSX = 0x4, /* 64-bit EIP, Flags, TSX */ > >> + LBR_FORMAT_EIP_FLAGS_TSX_INFO = 0x5, /* 64-bit EIP, Flags, TSX, > >> LBR_INFO > */ > >> + LBR_FORMAT_EIP_FLAGS_CYCLES = 0x6, /* 64-bit EIP, Flags, Cycles */ > >> +}; > >> + > >> +#define LBR_FROM_SIGNEXT_2MSB ((1ULL << 59) | (1ULL << 60)) > >> + > >> +static bool __read_mostly vmx_lbr_tsx_fixup_needed; > >> +static uint32_t __read_mostly lbr_from_start; > >> +static uint32_t __read_mostly lbr_from_end; > >> +static uint32_t __read_mostly lbr_lastint_from; > >> + > >> +static void __init vmx_lbr_tsx_fixup_check(void) > >> +{ > >> + bool tsx_support = cpu_has_hle || cpu_has_rtm; > >> + uint64_t caps; > >> + uint32_t lbr_format; > >> + > >> + /* Fixup is needed only when TSX support is disabled ... */ > >> + if ( tsx_support ) > >> + return; > > !tsx_support > > The original code is correct. This problem only manifests when TSX is > unavailable. > yes. my bad. > > > >> + > >> + if ( !cpu_has_pdcm ) > >> + return; > > combine two ifs into one > > > >> + > >> + rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps); > >> + lbr_format = caps & LBR_FORMAT_MSR_IA32_PERF_CAP; > >> + > >> + /* ... and the address format of LBR includes TSX bits 61:62 */ > > what's "..."? > > From the previous comment. > > > > >> + if ( lbr_format == LBR_FORMAT_EIP_FLAGS_TSX ) > >> + { > >> + const struct lbr_info *lbr = last_branch_msr_get(); > >> + > >> + if ( lbr == NULL ) > >> + return; > > move above check before rdmsrl. At least avoid one unnecessary > > msr read when LBR is not available. > > Which check and which rdmsr? In reality, if cpu_has_pdcm is set, we > would always expect to have LBR. move above 4 lines before reading PERF_CAPABILITIES. but looks not necessary after another thought. > > > > >> + > >> + lbr_lastint_from = lbr[LBR_LASTINT_FROM_IDX].base; > >> + lbr_from_start = lbr[LBR_LASTBRANCH_FROM_IDX].base; > >> + lbr_from_end = lbr_from_start + > >> lbr[LBR_LASTBRANCH_FROM_IDX].count; > >> + > >> + vmx_lbr_tsx_fixup_needed = true; > >> + } > >> +} > >> + > >> static int is_last_branch_msr(u32 ecx) > >> { > >> const struct lbr_info *lbr = last_branch_msr_get(); > >> @@ -2876,7 +2938,11 @@ static int vmx_msr_write_intercept(unsigned int msr, > uint64_t > >> msr_content) > >> for ( ; (rc == 0) && lbr->count; lbr++ ) > >> for ( i = 0; (rc == 0) && (i < lbr->count); i++ ) > >> if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 ) > >> + { > >> vmx_disable_intercept_for_msr(v, lbr->base + i, > MSR_TYPE_R | > >> MSR_TYPE_W); > >> + if ( vmx_lbr_tsx_fixup_needed ) > >> + v->arch.hvm_vmx.lbr_tsx_fixup_enabled = true; > >> + } > >> } > >> > >> if ( (rc < 0) || > >> @@ -3897,6 +3963,27 @@ out: > >> } > >> } > >> > >> +static void vmx_lbr_tsx_fixup(void) > >> +{ > >> + struct vcpu *curr = current; > >> + unsigned int msr_count = curr->arch.hvm_vmx.msr_count; > >> + struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; > >> + struct vmx_msr_entry *msr; > >> + > >> + if ( (msr = vmx_find_msr(lbr_from_start, VMX_GUEST_MSR)) != NULL ) > > Curious whether NULL is a valid situation here. If not we need handle it. > > Otherwise please ignore this comment. > > It is safer in the case that this function gets called and there are no > LBR MSRs in the load list. > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |