|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN][PATCH 1/2] x86: hvm: vmx: fix runtime vmx presence check for !CONFIG_INTEL_VMX case
From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
Since commit b99227347230 ("x86: Fix AMD_SVM and INTEL_VMX dependency") the
HVM Intel VT-x support can be gracefully disabled, but it still keeps VMX
code partially built-in, because HVM code uses mix of:
- "cpu_has_vmx" macro, which doesn't account for CONFIG_INTEL_VMX cfg
- "using_vmx()" function, which accounts for CONFIG_INTEL_VMX cfg
for runtime VMX availability checking. As result compiler DCE can't remove
all, unreachable VMX code.
Fix it by sticking to "cpu_has_vmx" macro usage only which is updated to
account CONFIG_INTEL_VMX cfg.
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
Hi
It could be good to have it in 4.21, so vmx/svm disabling
option will be in complete state within 4.21 version.
bloat-o-meter:
add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-779 (-779)
Function old new delta
guest_wrmsr_viridian 1062 1043 -19
hvm_monitor_descriptor_access 168 133 -35
init_guest_cpu_policies 1200 1164 -36
nestedhvm_setup 274 233 -41
p2m_mem_access_sanity_check 71 27 -44
hvm_set_param 1602 1473 -129
dom0_construct_pvh 4438 3963 -475
Total: Before=3422547, After=3421768, chg -0.02%
xen/arch/x86/hvm/hvm.c | 2 +-
xen/arch/x86/hvm/nestedhvm.c | 2 +-
xen/arch/x86/include/asm/cpufeature.h | 3 ++-
xen/arch/x86/include/asm/hvm/hvm.h | 5 -----
xen/arch/x86/mm/p2m-basic.c | 4 ++--
xen/arch/x86/traps.c | 4 ++--
6 files changed, 8 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a1d..57d09e02ed0f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -158,7 +158,7 @@ static int __init cf_check hvm_enable(void)
{
const struct hvm_function_table *fns = NULL;
- if ( using_vmx() )
+ if ( cpu_has_vmx )
fns = start_vmx();
else if ( using_svm() )
fns = start_svm();
diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index bddd77d8109b..c6329ba2e51a 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -155,7 +155,7 @@ static int __init cf_check nestedhvm_setup(void)
* done, so that if (for example) HAP is disabled, nested virt is
* disabled as well.
*/
- if ( using_vmx() )
+ if ( cpu_has_vmx )
start_nested_vmx(&hvm_funcs);
else if ( using_svm() )
start_nested_svm(&hvm_funcs);
diff --git a/xen/arch/x86/include/asm/cpufeature.h
b/xen/arch/x86/include/asm/cpufeature.h
index b6cf0c8dfc7c..f42e95586966 100644
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -136,7 +136,8 @@ static inline bool boot_cpu_has(unsigned int feat)
#define cpu_has_sse3 boot_cpu_has(X86_FEATURE_SSE3)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_monitor boot_cpu_has(X86_FEATURE_MONITOR)
-#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMX)
+#define cpu_has_vmx (IS_ENABLED(CONFIG_INTEL_VMX) && \
+ boot_cpu_has(X86_FEATURE_VMX))
#define cpu_has_eist boot_cpu_has(X86_FEATURE_EIST)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_fma boot_cpu_has(X86_FEATURE_FMA)
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691ea6..0fa9e3c21598 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -383,11 +383,6 @@ int hvm_copy_context_and_params(struct domain *dst, struct
domain *src);
int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value);
-static inline bool using_vmx(void)
-{
- return IS_ENABLED(CONFIG_INTEL_VMX) && cpu_has_vmx;
-}
-
static inline bool using_svm(void)
{
return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
diff --git a/xen/arch/x86/mm/p2m-basic.c b/xen/arch/x86/mm/p2m-basic.c
index e126fda26760..08007a687c32 100644
--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -40,7 +40,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain
*p2m)
p2m_pod_init(p2m);
p2m_nestedp2m_init(p2m);
- if ( hap_enabled(d) && using_vmx() )
+ if ( hap_enabled(d) && cpu_has_vmx )
ret = ept_p2m_init(p2m);
else
p2m_pt_init(p2m);
@@ -72,7 +72,7 @@ struct p2m_domain *p2m_init_one(struct domain *d)
void p2m_free_one(struct p2m_domain *p2m)
{
p2m_free_logdirty(p2m);
- if ( hap_enabled(p2m->domain) && using_vmx() )
+ if ( hap_enabled(p2m->domain) && cpu_has_vmx )
ept_p2m_uninit(p2m);
free_cpumask_var(p2m->dirty_cpumask);
xfree(p2m);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 0c5393cb2166..e5141f819330 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -918,7 +918,7 @@ void vcpu_show_execution_state(struct vcpu *v)
* region. Despite this being a layering violation, engage the VMCS right
* here. This then also avoids doing so several times in close succession.
*/
- if ( using_vmx() && is_hvm_vcpu(v) )
+ if ( cpu_has_vmx && is_hvm_vcpu(v) )
{
ASSERT(!in_irq());
vmx_vmcs_enter(v);
@@ -947,7 +947,7 @@ void vcpu_show_execution_state(struct vcpu *v)
console_unlock_recursive_irqrestore(flags);
}
- if ( using_vmx() && is_hvm_vcpu(v) )
+ if ( cpu_has_vmx && is_hvm_vcpu(v) )
vmx_vmcs_exit(v);
vcpu_unpause(v);
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |