|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RESEND PATCH v2 3/3] x86/hvm: vmx: refactor cache disable mode data
From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
The Cache Disable mode data is used only by VMX code, so move it from
common HVM structures into VMX specific structures:
- move "uc_lock", "is_in_uc_mode" fields from struct hvm_domain to struct
vmx_domain;
- move "cache_mode" field from struct hvm_vcpu to struct vmx_vcpu.
Hence, the "in_uc_mode" field is used directly in mm/shadow/multi.c
_sh_propagate(), introduce the hvm_is_in_uc_mode() macro to avoid direct
access to this field and account for INTEL_VMX configuration.
While here:
- rename "is_in_uc_mode" to "in_uc_mode"
- s/NORMAL_CACHE_MODE/CACHE_MODE_NORMAL
- s/NO_FILL_CACHE_MODE/CACHE_MODE_NO_FILL
Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
changes in v2:
- s/is_in_uc_mode/in_uc_mode
- s/*_CACHE_MODE/CACHE_MODE_* and move them after "cache_mode"
- reorder uc_lock,in_uc_mode in struct vmx_domain
- move spin_lock_init()
xen/arch/x86/hvm/hvm.c | 1 -
xen/arch/x86/hvm/vmx/vmx.c | 30 +++++++++++++------------
xen/arch/x86/include/asm/hvm/domain.h | 6 -----
xen/arch/x86/include/asm/hvm/hvm.h | 3 +++
xen/arch/x86/include/asm/hvm/vcpu.h | 3 ---
xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 13 +++++++++++
xen/arch/x86/include/asm/mtrr.h | 3 ---
xen/arch/x86/mm/shadow/multi.c | 2 +-
8 files changed, 33 insertions(+), 28 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9caca93e5f56..c09fb2ba6873 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -608,7 +608,6 @@ int hvm_domain_initialise(struct domain *d,
}
spin_lock_init(&d->arch.hvm.irq_lock);
- spin_lock_init(&d->arch.hvm.uc_lock);
spin_lock_init(&d->arch.hvm.write_map.lock);
rwlock_init(&d->arch.hvm.mmcfg_lock);
INIT_LIST_HEAD(&d->arch.hvm.write_map.list);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 67861bc8bccf..0d9a4a17c433 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -590,6 +590,8 @@ static int cf_check vmx_domain_initialise(struct domain *d)
*/
d->arch.hvm.vmx.exec_sp = is_hardware_domain(d) || opt_ept_exec_sp;
+ spin_lock_init(&d->arch.hvm.vmx.uc_lock);
+
if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
return rc;
@@ -1431,7 +1433,7 @@ static void cf_check vmx_set_segment_register(
static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
return 0;
vmx_vmcs_enter(v);
@@ -1443,7 +1445,7 @@ static int cf_check vmx_set_guest_pat(struct vcpu *v, u64
gpat)
static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
{
if ( !paging_mode_hap(v->domain) ||
- unlikely(v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+ unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
return 0;
vmx_vmcs_enter(v);
@@ -1462,7 +1464,7 @@ static bool domain_exit_uc_mode(struct vcpu *v)
{
if ( (vs == v) || !vs->is_initialised )
continue;
- if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+ if ( (vs->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) ||
mtrr_pat_not_equal(vs, v) )
return false;
}
@@ -1472,7 +1474,7 @@ static bool domain_exit_uc_mode(struct vcpu *v)
static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
{
- d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+ d->arch.hvm.vmx.in_uc_mode = is_in_uc_mode;
shadow_blow_tables_per_domain(d);
}
@@ -1483,10 +1485,10 @@ static void hvm_shadow_handle_cd(struct vcpu *v,
unsigned long value)
if ( value & X86_CR0_CD )
{
/* Entering no fill cache mode. */
- spin_lock(&d->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+ spin_lock(&d->arch.hvm.vmx.uc_lock);
+ v->arch.hvm.vmx.cache_mode = CACHE_MODE_NO_FILL;
- if ( !d->arch.hvm.is_in_uc_mode )
+ if ( !d->arch.hvm.vmx.in_uc_mode )
{
domain_pause_nosync(d);
@@ -1496,19 +1498,19 @@ static void hvm_shadow_handle_cd(struct vcpu *v,
unsigned long value)
domain_unpause(d);
}
- spin_unlock(&d->arch.hvm.uc_lock);
+ spin_unlock(&d->arch.hvm.vmx.uc_lock);
}
else if ( !(value & X86_CR0_CD) &&
- (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+ (v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
{
/* Exit from no fill cache mode. */
- spin_lock(&d->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+ spin_lock(&d->arch.hvm.vmx.uc_lock);
+ v->arch.hvm.vmx.cache_mode = CACHE_MODE_NORMAL;
if ( domain_exit_uc_mode(v) )
hvm_set_uc_mode(d, false);
- spin_unlock(&d->arch.hvm.uc_lock);
+ spin_unlock(&d->arch.hvm.vmx.uc_lock);
}
}
@@ -1549,11 +1551,11 @@ static void cf_check vmx_handle_cd(struct vcpu *v,
unsigned long value)
wbinvd(); /* flush possibly polluted cache */
hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
- v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+ v->arch.hvm.vmx.cache_mode = CACHE_MODE_NO_FILL;
}
else
{
- v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+ v->arch.hvm.vmx.cache_mode = CACHE_MODE_NORMAL;
vmx_set_guest_pat(v, *pat);
if ( !is_iommu_enabled(v->domain) || iommu_snoop )
vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
diff --git a/xen/arch/x86/include/asm/hvm/domain.h
b/xen/arch/x86/include/asm/hvm/domain.h
index 95d9336a28f0..83be2bd1c29c 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -97,12 +97,6 @@ struct hvm_domain {
/* VRAM dirty support. Protect with the domain paging lock. */
struct sh_dirty_vram *dirty_vram;
- /* If one of vcpus of this domain is in no_fill_mode or
- * mtrr/pat between vcpus is not the same, set is_in_uc_mode
- */
- spinlock_t uc_lock;
- bool is_in_uc_mode;
-
bool is_s3_suspended;
/* Compatibility setting for a bug in x2APIC LDR */
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index 20b373cea6fb..728b9624522f 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -394,6 +394,9 @@ static inline bool using_svm(void)
return IS_ENABLED(CONFIG_AMD_SVM) && cpu_has_svm;
}
+#define hvm_is_in_uc_mode(d) \
+ (using_vmx() && (d)->arch.hvm.vmx.in_uc_mode)
+
#ifdef CONFIG_HVM
#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h
b/xen/arch/x86/include/asm/hvm/vcpu.h
index 9ed9eaff3bc5..eae9ac53767b 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -168,9 +168,6 @@ struct hvm_vcpu {
u8 evtchn_upcall_vector;
- /* Which cache mode is this VCPU in (CR0:CD/NW)? */
- u8 cache_mode;
-
struct hvm_vcpu_io hvm_io;
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 53aa9e3556d3..d28a2682e9df 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -46,6 +46,7 @@ struct ept_data {
#define _VMX_DOMAIN_PML_ENABLED 0
#define VMX_DOMAIN_PML_ENABLED (1ul << _VMX_DOMAIN_PML_ENABLED)
+
struct vmx_domain {
mfn_t apic_access_mfn;
/* VMX_DOMAIN_* */
@@ -56,6 +57,13 @@ struct vmx_domain {
* around CVE-2018-12207 as appropriate.
*/
bool exec_sp;
+ /*
+ * If one of vcpus of this domain is in no_fill_mode or
+ * mtrr/pat between vcpus is not the same, set in_uc_mode.
+ * Protected by uc_lock.
+ */
+ bool in_uc_mode;
+ spinlock_t uc_lock;
};
/*
@@ -158,6 +166,11 @@ struct vmx_vcpu {
uint8_t lbr_flags;
+ /* Which cache mode is this VCPU in (CR0:CD/NW)? */
+ uint8_t cache_mode;
+#define CACHE_MODE_NORMAL 0
+#define CACHE_MODE_NO_FILL 2
+
/* Bitmask of segments that we can't safely use in virtual 8086 mode */
uint16_t vm86_segment_mask;
/* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */
diff --git a/xen/arch/x86/include/asm/mtrr.h b/xen/arch/x86/include/asm/mtrr.h
index 25d442659df2..3a5b4f5b6eec 100644
--- a/xen/arch/x86/include/asm/mtrr.h
+++ b/xen/arch/x86/include/asm/mtrr.h
@@ -7,9 +7,6 @@
#define MEMORY_NUM_TYPES MTRR_NUM_TYPES
#define NO_HARDCODE_MEM_TYPE MTRR_NUM_TYPES
-#define NORMAL_CACHE_MODE 0
-#define NO_FILL_CACHE_MODE 2
-
#define INVALID_MEM_TYPE X86_NUM_MT
/* In the Intel processor's MTRR interface, the MTRR type is always held in
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7be9c180ec43..03be61e225c0 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -559,7 +559,7 @@ _sh_propagate(struct vcpu *v,
if ( !mmio_mfn &&
(type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
sflags |= pat_type_2_pte_flags(type);
- else if ( d->arch.hvm.is_in_uc_mode )
+ else if ( hvm_is_in_uc_mode(d) )
sflags |= pat_type_2_pte_flags(X86_MT_UC);
else
if ( iomem_access_permitted(d, mfn_x(target_mfn),
mfn_x(target_mfn)) )
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |