[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN][PATCH 1/2] x86: hvm: vmx: fix runtime vmx presence check for !CONFIG_INTEL_VMX case


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Date: Tue, 16 Sep 2025 10:32:55 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=7ETb3dpCMMoT7n0RU1nZ6MxKaPbIvoDnb5WEDgR6cbo=; b=PtqnO8z61U47sjy3eumE5FU726KroxX2+1Ezh2YVtAOmRinT4TVLeD1Kc/xZEnje4IwcLJ6AN2wdiXMjasVUbS2+q2Bo5P4qvxGsmmIUmLObguMvLZxIlhOwinr+KuT8Br7FxcI2DLFstLpLLRauO+cFFzMPajzEzVM6uhbbwQpolJPex/SunaVq6eaQmznPRaqb3+bjIWxDfOsW6MLl6yLe7sNdxmafnRUJme9jJ+xMmfntv5+iK6d1dki+or6Uc0+psRNhioDzqWXrYDZAmYs+W1SKQk+PEDDNlNMgkccOWSlPhV75ZID8OlPv166MxLN0fgH6r5pPaZhkwU0Olw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=tlJCyIVh+FoaFWUCJGxYoTBfgx63HJR0SFy53VQ5kjFvrGYoY+m1KoirDpmFLySLYwDF0ukKnHyPvkWfCHUBFvswytmyN3tSd6NS2ANhYuPJaEecbikDIGu2sj8Wf7LajyMBOHH7xw22KmxQSqT3er3dxjZuN8dlSI7Vux422CejP4HYaVJzogm4SWPaBE75TExCOyH62uSrEH5e3If1gG6MUl8mBYhY12wVFglWXjmydM9wZ0A+lLmF+4o01ryFo33Mej2SZJUtlwx7e9hmhUEaEuDorgPte8A554xVT8AcAUr3HO/Y/ENIbCqKBZc8PNnXbCijNhcDqnVC73P09A==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Grygorii Strashko <grygorii_strashko@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>, Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
  • Delivery-date: Tue, 16 Sep 2025 10:33:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcJvVD2WER0/U75UWsICVLjMYHFA==
  • Thread-topic: [XEN][PATCH 1/2] x86: hvm: vmx: fix runtime vmx presence check for !CONFIG_INTEL_VMX case

From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>

Since commit b99227347230 ("x86: Fix AMD_SVM and INTEL_VMX dependency") the
HVM Intel VT-x support can be gracefully disabled, but it still keeps VMX
code partially built-in, because HVM code uses mix of:

 - "cpu_has_vmx" macro, which doesn't account for CONFIG_INTEL_VMX cfg
 - "using_vmx()" function, which accounts for CONFIG_INTEL_VMX cfg

for runtime VMX availability checking. As result compiler DCE can't remove
all, unreachable VMX code.

Fix it by sticking to "cpu_has_vmx" macro usage only which is updated to
account CONFIG_INTEL_VMX cfg.

Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
Hi

It could be good to have it in 4.21, so vmx/svm disabling
option will be in complete state within 4.21 version.

bloat-o-meter:
add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-779 (-779)
Function                                     old     new   delta
guest_wrmsr_viridian                        1062    1043     -19
hvm_monitor_descriptor_access                168     133     -35
init_guest_cpu_policies                     1200    1164     -36
nestedhvm_setup                              274     233     -41
p2m_mem_access_sanity_check                   71      27     -44
hvm_set_param                               1602    1473    -129
dom0_construct_pvh                          4438    3963    -475
Total: Before=3422547, After=3421768, chg -0.02%

 xen/arch/x86/hvm/hvm.c                | 2 +-
 xen/arch/x86/hvm/nestedhvm.c          | 2 +-
 xen/arch/x86/include/asm/cpufeature.h | 3 ++-
 xen/arch/x86/include/asm/hvm/hvm.h    | 5 -----
 xen/arch/x86/mm/p2m-basic.c           | 4 ++--
 xen/arch/x86/traps.c                  | 4 ++--
 6 files changed, 8 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a1d..57d09e02ed0f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -158,7 +158,7 @@ static int __init cf_check hvm_enable(void)
 {
     const struct hvm_function_table *fns = NULL;
 
-    if ( using_vmx() )
+    if ( cpu_has_vmx )
         fns = start_vmx();
     else if ( using_svm() )
         fns = start_svm();
diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index bddd77d8109b..c6329ba2e51a 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -155,7 +155,7 @@ static int __init cf_check nestedhvm_setup(void)
      * done, so that if (for example) HAP is disabled, nested virt is
      * disabled as well.
      */
-    if ( using_vmx() )
+    if ( cpu_has_vmx )
         start_nested_vmx(&hvm_funcs);
     else if ( using_svm() )
         start_nested_svm(&hvm_funcs);
diff --git a/xen/arch/x86/include/asm/cpufeature.h 
b/xen/arch/x86/include/asm/cpufeature.h
index b6cf0c8dfc7c..f42e95586966 100644
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -136,7 +136,8 @@ static inline bool boot_cpu_has(unsigned int feat)
 #define cpu_has_sse3            boot_cpu_has(X86_FEATURE_SSE3)
 #define cpu_has_pclmulqdq       boot_cpu_has(X86_FEATURE_PCLMULQDQ)
 #define cpu_has_monitor         boot_cpu_has(X86_FEATURE_MONITOR)
-#define cpu_has_vmx             boot_cpu_has(X86_FEATURE_VMX)
+#define cpu_has_vmx             (IS_ENABLED(CONFIG_INTEL_VMX) && \
+                                 boot_cpu_has(X86_FEATURE_VMX))
 #define cpu_has_eist            boot_cpu_has(X86_FEATURE_EIST)
 #define cpu_has_ssse3           boot_cpu_has(X86_FEATURE_SSSE3)
 #define cpu_has_fma             boot_cpu_has(X86_FEATURE_FMA)
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691ea6..0fa9e3c21598 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -383,11 +383,6 @@ int hvm_copy_context_and_params(struct domain *dst, struct 
domain *src);
 
 int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value);
 
-static inline bool using_vmx(void)
-{
-    return IS_ENABLED(CONFIG_INTEL_VMX) && cpu_has_vmx;
-}
-
 static inline bool using_svm(void)
 {
     return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
diff --git a/xen/arch/x86/mm/p2m-basic.c b/xen/arch/x86/mm/p2m-basic.c
index e126fda26760..08007a687c32 100644
--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -40,7 +40,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain 
*p2m)
     p2m_pod_init(p2m);
     p2m_nestedp2m_init(p2m);
 
-    if ( hap_enabled(d) && using_vmx() )
+    if ( hap_enabled(d) && cpu_has_vmx )
         ret = ept_p2m_init(p2m);
     else
         p2m_pt_init(p2m);
@@ -72,7 +72,7 @@ struct p2m_domain *p2m_init_one(struct domain *d)
 void p2m_free_one(struct p2m_domain *p2m)
 {
     p2m_free_logdirty(p2m);
-    if ( hap_enabled(p2m->domain) && using_vmx() )
+    if ( hap_enabled(p2m->domain) && cpu_has_vmx )
         ept_p2m_uninit(p2m);
     free_cpumask_var(p2m->dirty_cpumask);
     xfree(p2m);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 0c5393cb2166..e5141f819330 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -918,7 +918,7 @@ void vcpu_show_execution_state(struct vcpu *v)
      * region. Despite this being a layering violation, engage the VMCS right
      * here. This then also avoids doing so several times in close succession.
      */
-    if ( using_vmx() && is_hvm_vcpu(v) )
+    if ( cpu_has_vmx && is_hvm_vcpu(v) )
     {
         ASSERT(!in_irq());
         vmx_vmcs_enter(v);
@@ -947,7 +947,7 @@ void vcpu_show_execution_state(struct vcpu *v)
         console_unlock_recursive_irqrestore(flags);
     }
 
-    if ( using_vmx() && is_hvm_vcpu(v) )
+    if ( cpu_has_vmx && is_hvm_vcpu(v) )
         vmx_vmcs_exit(v);
 
     vcpu_unpause(v);
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.