[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RESEND PATCH v2 1/3] x86/hvm: move hvm_shadow_handle_cd() to vmx code


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Date: Tue, 11 Nov 2025 20:10:00 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=+avs39yG5SK8mfCma+H15nY8tDlKMQPGd027kBE3V08=; b=cj7Qg+Qvm0K/gJv/g100ujBGKk2iEcngFIDX8EenC7oC2RSUKy7ZyLtsmUI8anRxbi89hMoIKbij6svktY2+NTAOUx05CqO+NNJUYq5XOcPd++/0q/meI461IgAj8ZjYFZiaapEgOjLPS7nHP073qXpZ0Ye89WhNYyG9A9BuL5lQIocLsGOFAMt0lC9GR1MZp30suz91bYgkRTaY8dKX5nmdyoQ5gq1qcM1Oio0fX/tTmQIB1jViy7woOIaJjxlKLb1in5CO2ZQZ6stuIoDGzBuwKeSmwm4Lgzh5y5K3QIXGdcvYzq1zFPP1GQ2vUcMNp7d1Ck3zBIg/IPymckfaWA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=Uqqrbz1fkos4x7kBPHdwWFSUVzINzWgiIufyXJj0dGO8++zyKGAIj2Mu6aITQSlzodf3vKCNFQs8eYbzvEf+y3aTAwMap4q38pUJ9E0uAs2CANwZuQNmt7jqv6in91eJ97UKFHVaQlt5c1Jan0TJM3yCQ/4n2nDsP2iqbCsnG+cwMMrcWeHd+uVG4WW5xDZ+yUrsSPHBFPq+U6THOzZmcL/mUix9heQuNBlzeJeh7BQk1uCXvQENkHIywct8M1VPhlUSpTGPNIKWcdpUbJSmpE8JqI2wH5xu5hjInVpHw6YCdHD61cDDH0CusfRLp89+cOCLv9qZMS7p5emG/70Few==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Grygorii Strashko <grygorii_strashko@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>, Teddy Astie <teddy.astie@xxxxxxxxxx>
  • Delivery-date: Tue, 11 Nov 2025 20:10:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcU0coeroPA1YNb0eMzNzEaOtgJQ==
  • Thread-topic: [RESEND PATCH v2 1/3] x86/hvm: move hvm_shadow_handle_cd() to vmx code

From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>

Functions:
 hvm_shadow_handle_cd()
 hvm_set_uc_mode()
 domain_exit_uc_mode()
are used only by Intel VMX code, so move them to VMX code.

While here:
- minor format change in domain_exit_uc_mode()
- s/(0/1)/(false/true) for bool types
- use "struct domain *" as parameter in hvm_set_uc_mode()
- use "struct domain *d" as local var in hvm_shadow_handle_cd()

No functional changes.

Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
changes in v2:
- use "struct domain *" as parameter in hvm_set_uc_mode()
- use "struct domain *d" as local var in hvm_shadow_handle_cd()
- move code before vmx_handle_cd()

 xen/arch/x86/hvm/hvm.c                 | 58 ------------------------
 xen/arch/x86/hvm/vmx/vmx.c             | 61 ++++++++++++++++++++++++++
 xen/arch/x86/include/asm/hvm/support.h |  2 -
 3 files changed, 61 insertions(+), 60 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index da3cde1ff0e6..9caca93e5f56 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
     return X86EMUL_OKAY;
 }
 
-/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
-static bool domain_exit_uc_mode(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-    struct vcpu *vs;
-
-    for_each_vcpu ( d, vs )
-    {
-        if ( (vs == v) || !vs->is_initialised )
-            continue;
-        if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
-             mtrr_pat_not_equal(vs, v) )
-            return 0;
-    }
-
-    return 1;
-}
-
-static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
-{
-    v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
-    shadow_blow_tables_per_domain(v->domain);
-}
-
 int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
 {
     struct vcpu *curr = current;
@@ -2273,40 +2249,6 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
     return X86EMUL_UNHANDLEABLE;
 }
 
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
-{
-    if ( value & X86_CR0_CD )
-    {
-        /* Entering no fill cache mode. */
-        spin_lock(&v->domain->arch.hvm.uc_lock);
-        v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
-
-        if ( !v->domain->arch.hvm.is_in_uc_mode )
-        {
-            domain_pause_nosync(v->domain);
-
-            /* Flush physical caches. */
-            flush_all(FLUSH_CACHE_EVICT);
-            hvm_set_uc_mode(v, 1);
-
-            domain_unpause(v->domain);
-        }
-        spin_unlock(&v->domain->arch.hvm.uc_lock);
-    }
-    else if ( !(value & X86_CR0_CD) &&
-              (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
-    {
-        /* Exit from no fill cache mode. */
-        spin_lock(&v->domain->arch.hvm.uc_lock);
-        v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
-
-        if ( domain_exit_uc_mode(v) )
-            hvm_set_uc_mode(v, 0);
-
-        spin_unlock(&v->domain->arch.hvm.uc_lock);
-    }
-}
-
 static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
 {
     v->arch.hvm.guest_cr[cr] = value;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 6f2cc635e582..d7efd0a73add 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -44,6 +44,7 @@
 #include <asm/processor.h>
 #include <asm/prot-key.h>
 #include <asm/regs.h>
+#include <asm/shadow.h>
 #include <asm/spec_ctrl.h>
 #include <asm/stubs.h>
 #include <asm/x86_emulate.h>
@@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, 
u64 *gpat)
     return 1;
 }
 
+/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
+static bool domain_exit_uc_mode(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    struct vcpu *vs;
+
+    for_each_vcpu(d, vs)
+    {
+        if ( (vs == v) || !vs->is_initialised )
+            continue;
+        if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+             mtrr_pat_not_equal(vs, v) )
+            return false;
+    }
+
+    return true;
+}
+
+static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
+{
+    d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+    shadow_blow_tables_per_domain(d);
+}
+
+static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
+{
+    struct domain *d = v->domain;
+
+    if ( value & X86_CR0_CD )
+    {
+        /* Entering no fill cache mode. */
+        spin_lock(&d->arch.hvm.uc_lock);
+        v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+
+        if ( !d->arch.hvm.is_in_uc_mode )
+        {
+            domain_pause_nosync(d);
+
+            /* Flush physical caches. */
+            flush_all(FLUSH_CACHE_EVICT);
+            hvm_set_uc_mode(d, true);
+
+            domain_unpause(d);
+        }
+        spin_unlock(&d->arch.hvm.uc_lock);
+    }
+    else if ( !(value & X86_CR0_CD) &&
+              (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+    {
+        /* Exit from no fill cache mode. */
+        spin_lock(&d->arch.hvm.uc_lock);
+        v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+
+        if ( domain_exit_uc_mode(v) )
+            hvm_set_uc_mode(d, false);
+
+        spin_unlock(&d->arch.hvm.uc_lock);
+    }
+}
+
 static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
 {
     if ( !paging_mode_hap(v->domain) )
diff --git a/xen/arch/x86/include/asm/hvm/support.h 
b/xen/arch/x86/include/asm/hvm/support.h
index 2a7ba36af06f..9e9fa6295567 100644
--- a/xen/arch/x86/include/asm/hvm/support.h
+++ b/xen/arch/x86/include/asm/hvm/support.h
@@ -119,8 +119,6 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
 
 int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
 
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
-
 /*
  * These functions all return X86EMUL return codes.  For hvm_set_*(), the
  * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.