[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN][PATCH v2 3/3] x86/hvm: vmx: refactor cache disable mode data


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Date: Fri, 7 Nov 2025 15:05:01 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=jgSg04VTwELxGBtpkailCzDMtsJ8p2BN93jiaDfig1c=; b=s1pe0qGsl61SKJII9UI9+ECWJKvH3BejBHLKIFZvxsSK+MArYW0Q3hMT+4qhVcolwdNksxsaek4AZe7zKcuFo9PoqRUUw5L9U//rNWt95gp+M/3HKvFzrW2ctAiWBbFmiU//Guja6Z7IKTNSwUIFBXHRwvvNtOdeTtXkX/gdiYgIbLyQpRW6n0h5ehxUtch78qq8+tUqGD8/06J9gkJtNFsS6qDMol7SIw4etG+/HNIKFnyFkuX8Ij/+M718Q8XX81CrXrP3Ml+MDTIKYJyd6hVPc7EQG4LfKDIBOhp9z442h4U7Wltkp/nxxlLuyYhMLnXkCk8nsYoHpHjXQ+ANnA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=zP2ELMR329gCGgI9wMOaFXtotby9FgtsYzAT14L+Fm3MNwJ4kx5jmqgoXszZjXyYw+b35gabecf9FiKYkyRPo4aP2n8+R3FJ0Ard4D+FuvBePb26huKby73/Xu771t2I6YRrtH+OxhZfKFJBFE0r2hru6z4uPD9QSGdTp1Z5u13yYJBMufuGRt5Dscog2wZcmSH9gqYLSP5e9AZAEIP+ZP5wQGu+iFzRMejp7WIqOCMJvT5j8N8FTxhwz0c4KV8kIvlQcKUk3FDAe2M+FDwVGE7TJMqHGUzTRwqpabOWLlsFUE32y1qqoPtZMBaP35mz/jOfORUy36bTq3iem9xGkQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Grygorii Strashko <grygorii_strashko@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>, Teddy Astie <teddy.astie@xxxxxxxxxx>
  • Delivery-date: Fri, 07 Nov 2025 15:05:08 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcT/fjyqxpZv5UYEyu+2eXf1BscA==
  • Thread-topic: [XEN][PATCH v2 3/3] x86/hvm: vmx: refactor cache disable mode data

From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>

The Cache Disable mode data is used only by VMX code, so move it from
common HVM structures into VMX specific structures:
- move "uc_lock", "is_in_uc_mode" fields from struct hvm_domain to struct
vmx_domain;
- move "cache_mode" field from struct hvm_vcpu to struct vmx_vcpu.

Hence, the "in_uc_mode" field is used directly in mm/shadow/multi.c
_sh_propagate(), introduce the hvm_is_in_uc_mode() macro to avoid direct
access to this field and account for INTEL_VMX configuration.

While here:
- rename "is_in_uc_mode" to "in_uc_mode"
- s/NORMAL_CACHE_MODE/CACHE_MODE_NORMAL
- s/NO_FILL_CACHE_MODE/CACHE_MODE_NO_FILL

Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
changes in v2:
- s/is_in_uc_mode/in_uc_mode
- s/*_CACHE_MODE/CACHE_MODE_* and move them after "cache_mode"
- reorder uc_lock,in_uc_mode in struct vmx_domain
- move spin_lock_init()

 xen/arch/x86/hvm/hvm.c                  |  1 -
 xen/arch/x86/hvm/vmx/vmx.c              | 28 +++++++++++++------------
 xen/arch/x86/include/asm/hvm/domain.h   |  6 ------
 xen/arch/x86/include/asm/hvm/hvm.h      |  3 +++
 xen/arch/x86/include/asm/hvm/vcpu.h     |  3 ---
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 13 ++++++++++++
 xen/arch/x86/include/asm/mtrr.h         |  3 ---
 xen/arch/x86/mm/shadow/multi.c          |  2 +-
 8 files changed, 32 insertions(+), 27 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9caca93e5f56..c09fb2ba6873 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -608,7 +608,6 @@ int hvm_domain_initialise(struct domain *d,
     }
 
     spin_lock_init(&d->arch.hvm.irq_lock);
-    spin_lock_init(&d->arch.hvm.uc_lock);
     spin_lock_init(&d->arch.hvm.write_map.lock);
     rwlock_init(&d->arch.hvm.mmcfg_lock);
     INIT_LIST_HEAD(&d->arch.hvm.write_map.list);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 67861bc8bccf..12748298c10c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -590,6 +590,8 @@ static int cf_check vmx_domain_initialise(struct domain *d)
      */
     d->arch.hvm.vmx.exec_sp = is_hardware_domain(d) || opt_ept_exec_sp;
 
+    spin_lock_init(&d->arch.hvm.vmx.uc_lock);
+
     if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
         return rc;
 
@@ -1431,7 +1433,7 @@ static void cf_check vmx_set_segment_register(
 static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
 {
     if ( !paging_mode_hap(v->domain) ||
-         unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+         unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
     vmx_vmcs_enter(v);
@@ -1443,7 +1445,7 @@ static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 
gpat)
 static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
 {
     if ( !paging_mode_hap(v->domain) ||
-         unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+         unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
     vmx_vmcs_enter(v);
@@ -1462,7 +1464,7 @@ static bool domain_exit_uc_mode(struct vcpu *v)
     {
         if ( (vs == v) || !vs->is_initialised )
             continue;
-        if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+        if ( (vs->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) ||
              mtrr_pat_not_equal(vs, v) )
             return false;
     }
@@ -1472,7 +1474,7 @@ static bool domain_exit_uc_mode(struct vcpu *v)
 
 static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
 {
-    d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+    d->arch.hvm.vmx.in_uc_mode = is_in_uc_mode;
     shadow_blow_tables_per_domain(d);
 }
 
@@ -1483,10 +1485,10 @@ static void hvm_shadow_handle_cd(struct vcpu *v, 
unsigned long value)
     if ( value & X86_CR0_CD )
     {
         /* Entering no fill cache mode. */
-        spin_lock(&d->arch.hvm.uc_lock);
-        v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+        spin_lock(&d->arch.hvm.vmx.uc_lock);
+        v->arch.hvm.vmx.cache_mode = CACHE_MODE_NO_FILL;
 
-        if ( !d->arch.hvm.is_in_uc_mode )
+        if ( !d->arch.hvm.vmx.in_uc_mode )
         {
             domain_pause_nosync(d);
 
@@ -1499,16 +1501,16 @@ static void hvm_shadow_handle_cd(struct vcpu *v, 
unsigned long value)
         spin_unlock(&d->arch.hvm.uc_lock);
     }
     else if ( !(value & X86_CR0_CD) &&
-              (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+              (v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
     {
         /* Exit from no fill cache mode. */
-        spin_lock(&d->arch.hvm.uc_lock);
-        v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+        spin_lock(&d->arch.hvm.vmx.uc_lock);
+        v->arch.hvm.vmx.cache_mode = CACHE_MODE_NORMAL;
 
         if ( domain_exit_uc_mode(v) )
             hvm_set_uc_mode(d, false);
 
-        spin_unlock(&d->arch.hvm.uc_lock);
+        spin_unlock(&d->arch.hvm.vmx.uc_lock);
     }
 }
 
@@ -1549,11 +1551,11 @@ static void cf_check vmx_handle_cd(struct vcpu *v, 
unsigned long value)
 
             wbinvd();               /* flush possibly polluted cache */
             hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
-            v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+            v->arch.hvm.vmx.cache_mode = CACHE_MODE_NO_FILL;
         }
         else
         {
-            v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+            v->arch.hvm.vmx.cache_mode = CACHE_MODE_NORMAL;
             vmx_set_guest_pat(v, *pat);
             if ( !is_iommu_enabled(v->domain) || iommu_snoop )
                 vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
diff --git a/xen/arch/x86/include/asm/hvm/domain.h 
b/xen/arch/x86/include/asm/hvm/domain.h
index 95d9336a28f0..83be2bd1c29c 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -97,12 +97,6 @@ struct hvm_domain {
     /* VRAM dirty support.  Protect with the domain paging lock. */
     struct sh_dirty_vram *dirty_vram;
 
-    /* If one of vcpus of this domain is in no_fill_mode or
-     * mtrr/pat between vcpus is not the same, set is_in_uc_mode
-     */
-    spinlock_t             uc_lock;
-    bool                   is_in_uc_mode;
-
     bool                   is_s3_suspended;
 
     /* Compatibility setting for a bug in x2APIC LDR */
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index 20b373cea6fb..728b9624522f 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -394,6 +394,9 @@ static inline bool using_svm(void)
     return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
 }
 
+#define hvm_is_in_uc_mode(d) \
+    (using_vmx() && (d)->arch.hvm.vmx.in_uc_mode)
+
 #ifdef CONFIG_HVM
 
 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h 
b/xen/arch/x86/include/asm/hvm/vcpu.h
index 9ed9eaff3bc5..eae9ac53767b 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -168,9 +168,6 @@ struct hvm_vcpu {
 
     u8                  evtchn_upcall_vector;
 
-    /* Which cache mode is this VCPU in (CR0:CD/NW)? */
-    u8                  cache_mode;
-
     struct hvm_vcpu_io  hvm_io;
 
     /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 53aa9e3556d3..d28a2682e9df 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -46,6 +46,7 @@ struct ept_data {
 
 #define _VMX_DOMAIN_PML_ENABLED    0
 #define VMX_DOMAIN_PML_ENABLED     (1ul << _VMX_DOMAIN_PML_ENABLED)
+
 struct vmx_domain {
     mfn_t apic_access_mfn;
     /* VMX_DOMAIN_* */
@@ -56,6 +57,13 @@ struct vmx_domain {
      * around CVE-2018-12207 as appropriate.
      */
     bool exec_sp;
+    /*
+     * If one of vcpus of this domain is in no_fill_mode or
+     * mtrr/pat between vcpus is not the same, set in_uc_mode.
+     * Protected by uc_lock.
+     */
+    bool in_uc_mode;
+    spinlock_t uc_lock;
 };
 
 /*
@@ -158,6 +166,11 @@ struct vmx_vcpu {
 
     uint8_t              lbr_flags;
 
+    /* Which cache mode is this VCPU in (CR0:CD/NW)? */
+    uint8_t              cache_mode;
+#define CACHE_MODE_NORMAL  0
+#define CACHE_MODE_NO_FILL 2
+
     /* Bitmask of segments that we can't safely use in virtual 8086 mode */
     uint16_t             vm86_segment_mask;
     /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */
diff --git a/xen/arch/x86/include/asm/mtrr.h b/xen/arch/x86/include/asm/mtrr.h
index 25d442659df2..3a5b4f5b6eec 100644
--- a/xen/arch/x86/include/asm/mtrr.h
+++ b/xen/arch/x86/include/asm/mtrr.h
@@ -7,9 +7,6 @@
 #define MEMORY_NUM_TYPES     MTRR_NUM_TYPES
 #define NO_HARDCODE_MEM_TYPE MTRR_NUM_TYPES
 
-#define NORMAL_CACHE_MODE          0
-#define NO_FILL_CACHE_MODE         2
-
 #define INVALID_MEM_TYPE X86_NUM_MT
 
 /* In the Intel processor's MTRR interface, the MTRR type is always held in
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7be9c180ec43..03be61e225c0 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -559,7 +559,7 @@ _sh_propagate(struct vcpu *v,
         if ( !mmio_mfn &&
              (type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
-        else if ( d->arch.hvm.is_in_uc_mode )
+        else if ( hvm_is_in_uc_mode(d) )
             sflags |= pat_type_2_pte_flags(X86_MT_UC);
         else
             if ( iomem_access_permitted(d, mfn_x(target_mfn), 
mfn_x(target_mfn)) )
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.