|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86: Refactor conditional guard in probe_cpuid_faulting()
commit 1240932a8d3174849a144f395eb858c755a6a297
Author: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
AuthorDate: Tue May 16 17:18:31 2023 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue May 16 17:18:31 2023 +0200
x86: Refactor conditional guard in probe_cpuid_faulting()
Move vendor-specific checks to the vendor-specific callers. While at it
move the synth cap setters to the callers too, as it's needed for a later
patch and it's not a functional change either.
No functional change.
Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/cpu/amd.c | 13 ++++++++++++-
xen/arch/x86/cpu/common.c | 13 -------------
xen/arch/x86/cpu/intel.c | 12 +++++++++++-
3 files changed, 23 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 9a1a3858ed..98fb80ee88 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -271,8 +271,19 @@ static void __init noinline amd_init_levelling(void)
{
const struct cpuidmask *m = NULL;
- if (probe_cpuid_faulting())
+ /*
+ * If there's support for CpuidUserDis or CPUID faulting then
+ * we can skip levelling because CPUID accesses are trapped anyway.
+ *
+ * CPUID faulting is an Intel feature analogous to CpuidUserDis, so
+ * that can only be present when Xen is itself virtualized (because
+ * it can be emulated)
+ */
+ if (cpu_has_hypervisor && probe_cpuid_faulting()) {
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
return;
+ }
probe_masking_msrs();
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index edc4db1335..52646f7dfb 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -131,17 +131,6 @@ bool __init probe_cpuid_faulting(void)
uint64_t val;
int rc;
- /*
- * Don't bother looking for CPUID faulting if we aren't virtualised on
- * AMD or Hygon hardware - it won't be present. Likewise for Fam0F
- * Intel hardware.
- */
- if (((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
- boot_cpu_data.x86 == 0xf)) &&
- !cpu_has_hypervisor)
- return false;
-
if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
raw_cpu_policy.platform_info.cpuid_faulting =
val & MSR_PLATFORM_INFO_CPUID_FAULTING;
@@ -155,8 +144,6 @@ bool __init probe_cpuid_faulting(void)
return false;
}
- expected_levelling_cap |= LCAP_faulting;
- levelling_caps |= LCAP_faulting;
setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
return true;
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 71fc1a1e18..168cd58f36 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -226,8 +226,18 @@ static void cf_check intel_ctxt_switch_masking(const
struct vcpu *next)
*/
static void __init noinline intel_init_levelling(void)
{
- if (probe_cpuid_faulting())
+ /*
+ * Intel Fam0f is old enough that probing for CPUID faulting support
+ * introduces spurious #GP(0) when the appropriate MSRs are read,
+ * so skip it altogether. In the case where Xen is virtualized these
+ * MSRs may be emulated though, so we allow it in that case.
+ */
+ if ((boot_cpu_data.x86 != 0xf || cpu_has_hypervisor) &&
+ probe_cpuid_faulting()) {
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
return;
+ }
probe_masking_msrs();
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |