[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.16] x86/vmx: Calculate model-specific LBRs once at start of day
commit 401e9e33a04c2a9887636ef58490c764543f0538 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Tue Feb 7 17:04:18 2023 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Feb 7 17:04:18 2023 +0100 x86/vmx: Calculate model-specific LBRs once at start of day There is no point repeating this calculation at runtime, especially as it is in the fallback path of the WRSMR/RDMSR handlers. Move the infrastructure higher in vmx.c to avoid forward declarations, renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that these are model-specific only. No practical change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> master commit: e94af0d58f86c3a914b9cbbf4d9ed3d43b974771 master date: 2023-01-12 18:42:00 +0000 --- xen/arch/x86/hvm/vmx/vmx.c | 276 +++++++++++++++++++++++---------------------- 1 file changed, 139 insertions(+), 137 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 3f42765313..bc308d9df2 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -394,6 +394,142 @@ void vmx_pi_hooks_deassign(struct domain *d) domain_unpause(d); } +static const struct lbr_info { + u32 base, count; +} p4_lbr[] = { + { MSR_P4_LER_FROM_LIP, 1 }, + { MSR_P4_LER_TO_LIP, 1 }, + { MSR_P4_LASTBRANCH_TOS, 1 }, + { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, + { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, + { 0, 0 } +}, c2_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_C2_LASTBRANCH_TOS, 1 }, + { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, + { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, + { 0, 0 } +}, nh_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_NHL_LBR_SELECT, 1 }, + { MSR_NHL_LASTBRANCH_TOS, 1 }, + { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, + { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, + { 0, 0 } +}, sk_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_NHL_LBR_SELECT, 1 }, + { MSR_NHL_LASTBRANCH_TOS, 1 }, + { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH }, + { MSR_SKL_LASTBRANCH_0_TO_IP, NUM_MSR_SKL_LASTBRANCH }, + { MSR_SKL_LASTBRANCH_0_INFO, NUM_MSR_SKL_LASTBRANCH }, + { 0, 0 } +}, at_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_C2_LASTBRANCH_TOS, 1 }, + { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, + { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, + { 0, 0 } +}, sm_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_SM_LBR_SELECT, 1 }, + { MSR_SM_LASTBRANCH_TOS, 1 }, + { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, + { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, + { 0, 0 } +}, gm_lbr[] = { + { MSR_IA32_LASTINTFROMIP, 1 }, + { MSR_IA32_LASTINTTOIP, 1 }, + { MSR_SM_LBR_SELECT, 1 }, + { MSR_SM_LASTBRANCH_TOS, 1 }, + { MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, + { MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, + { 0, 0 } +}; +static const struct lbr_info *__read_mostly model_specific_lbr; + +static const struct lbr_info *__init get_model_specific_lbr(void) +{ + switch ( boot_cpu_data.x86 ) + { + case 6: + switch ( boot_cpu_data.x86_model ) + { + /* Core2 Duo */ + case 0x0f: + /* Enhanced Core */ + case 0x17: + /* Xeon 7400 */ + case 0x1d: + return c2_lbr; + /* Nehalem */ + case 0x1a: case 0x1e: case 0x1f: case 0x2e: + /* Westmere */ + case 0x25: case 0x2c: case 0x2f: + /* Sandy Bridge */ + case 0x2a: case 0x2d: + /* Ivy Bridge */ + case 0x3a: case 0x3e: + /* Haswell */ + case 0x3c: case 0x3f: case 0x45: case 0x46: + /* Broadwell */ + case 0x3d: case 0x47: case 0x4f: case 0x56: + return nh_lbr; + /* Skylake */ + case 0x4e: case 0x5e: + /* Xeon Scalable */ + case 0x55: + /* Cannon Lake */ + case 0x66: + /* Goldmont Plus */ + case 0x7a: + /* Ice Lake */ + case 0x6a: case 0x6c: case 0x7d: case 0x7e: + /* Tiger Lake */ + case 0x8c: case 0x8d: + /* Tremont */ + case 0x86: + /* Kaby Lake */ + case 0x8e: case 0x9e: + /* Comet Lake */ + case 0xa5: case 0xa6: + return sk_lbr; + /* Atom */ + case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36: + return at_lbr; + /* Silvermont */ + case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d: + /* Xeon Phi Knights Landing */ + case 0x57: + /* Xeon Phi Knights Mill */ + case 0x85: + /* Airmont */ + case 0x4c: + return sm_lbr; + /* Goldmont */ + case 0x5c: case 0x5f: + return gm_lbr; + } + break; + + case 15: + switch ( boot_cpu_data.x86_model ) + { + /* Pentium4/Xeon with em64t */ + case 3: case 4: case 6: + return p4_lbr; + } + break; + } + + return NULL; +} + static int vmx_domain_initialise(struct domain *d) { static const struct arch_csw csw = { @@ -2812,6 +2948,7 @@ const struct hvm_function_table * __init start_vmx(void) vmx_function_table.get_guest_bndcfgs = vmx_get_guest_bndcfgs; } + model_specific_lbr = get_model_specific_lbr(); lbr_tsx_fixup_check(); ler_to_fixup_check(); @@ -2958,141 +3095,6 @@ static int vmx_cr_access(cr_access_qual_t qual) return X86EMUL_OKAY; } -static const struct lbr_info { - u32 base, count; -} p4_lbr[] = { - { MSR_P4_LER_FROM_LIP, 1 }, - { MSR_P4_LER_TO_LIP, 1 }, - { MSR_P4_LASTBRANCH_TOS, 1 }, - { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, c2_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_C2_LASTBRANCH_TOS, 1 }, - { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, - { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, nh_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_NHL_LBR_SELECT, 1 }, - { MSR_NHL_LASTBRANCH_TOS, 1 }, - { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, sk_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_NHL_LBR_SELECT, 1 }, - { MSR_NHL_LASTBRANCH_TOS, 1 }, - { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH }, - { MSR_SKL_LASTBRANCH_0_TO_IP, NUM_MSR_SKL_LASTBRANCH }, - { MSR_SKL_LASTBRANCH_0_INFO, NUM_MSR_SKL_LASTBRANCH }, - { 0, 0 } -}, at_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_C2_LASTBRANCH_TOS, 1 }, - { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, sm_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_SM_LBR_SELECT, 1 }, - { MSR_SM_LASTBRANCH_TOS, 1 }, - { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_ATOM_LASTBRANCH_FROM_TO }, - { 0, 0 } -}, gm_lbr[] = { - { MSR_IA32_LASTINTFROMIP, 1 }, - { MSR_IA32_LASTINTTOIP, 1 }, - { MSR_SM_LBR_SELECT, 1 }, - { MSR_SM_LASTBRANCH_TOS, 1 }, - { MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, - { MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO }, - { 0, 0 } -}; - -static const struct lbr_info *last_branch_msr_get(void) -{ - switch ( boot_cpu_data.x86 ) - { - case 6: - switch ( boot_cpu_data.x86_model ) - { - /* Core2 Duo */ - case 0x0f: - /* Enhanced Core */ - case 0x17: - /* Xeon 7400 */ - case 0x1d: - return c2_lbr; - /* Nehalem */ - case 0x1a: case 0x1e: case 0x1f: case 0x2e: - /* Westmere */ - case 0x25: case 0x2c: case 0x2f: - /* Sandy Bridge */ - case 0x2a: case 0x2d: - /* Ivy Bridge */ - case 0x3a: case 0x3e: - /* Haswell */ - case 0x3c: case 0x3f: case 0x45: case 0x46: - /* Broadwell */ - case 0x3d: case 0x47: case 0x4f: case 0x56: - return nh_lbr; - /* Skylake */ - case 0x4e: case 0x5e: - /* Xeon Scalable */ - case 0x55: - /* Cannon Lake */ - case 0x66: - /* Goldmont Plus */ - case 0x7a: - /* Ice Lake */ - case 0x6a: case 0x6c: case 0x7d: case 0x7e: - /* Tiger Lake */ - case 0x8c: case 0x8d: - /* Tremont */ - case 0x86: - /* Kaby Lake */ - case 0x8e: case 0x9e: - /* Comet Lake */ - case 0xa5: case 0xa6: - return sk_lbr; - /* Atom */ - case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36: - return at_lbr; - /* Silvermont */ - case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d: - /* Xeon Phi Knights Landing */ - case 0x57: - /* Xeon Phi Knights Mill */ - case 0x85: - /* Airmont */ - case 0x4c: - return sm_lbr; - /* Goldmont */ - case 0x5c: case 0x5f: - return gm_lbr; - } - break; - - case 15: - switch ( boot_cpu_data.x86_model ) - { - /* Pentium4/Xeon with em64t */ - case 3: case 4: case 6: - return p4_lbr; - } - break; - } - - return NULL; -} - enum { LBR_FORMAT_32 = 0x0, /* 32-bit record format */ @@ -3199,7 +3201,7 @@ static void __init ler_to_fixup_check(void) static int is_last_branch_msr(u32 ecx) { - const struct lbr_info *lbr = last_branch_msr_get(); + const struct lbr_info *lbr = model_specific_lbr; if ( lbr == NULL ) return 0; @@ -3536,7 +3538,7 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( !(v->arch.hvm.vmx.lbr_flags & LBR_MSRS_INSERTED) && (msr_content & IA32_DEBUGCTLMSR_LBR) ) { - const struct lbr_info *lbr = last_branch_msr_get(); + const struct lbr_info *lbr = model_specific_lbr; if ( unlikely(!lbr) ) { -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.16
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |