|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 3/7] xen/monitor: wrap monitor_op under CONFIG_VM_EVENT
Feature monitor_op is based on vm event subsystem, so monitor.o shall be
wrapped under CONFIG_VM_EVENT.
The following functions are only invoked by monitor-op, so they all shall be
wrapped with CONFIG_VM_EVENT (otherwise they will become unreachable and
violate Misra rule 2.1 when VM_EVENT=n):
- hvm_enable_msr_interception
- hvm_function_table.enable_msr_interception
- hvm_has_set_descriptor_access_existing
- hvm_function_table.set_descriptor_access_existi
- arch_monitor_get_capabilities
Function monitored_msr() still needs a stub to pass compilation when
VM_EVENT=n.
Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v3 -> v4:
- a new commit split from previous "xen/vm_event: consolidate CONFIG_VM_EVENT"
- Another blank line ahead of the #ifdef
- Move hvm_enable_msr_interception() up into the earlier #ifdef
- only arch_monitor_get_capabilities() needs wrapping, as this static inline
function calls hvm_has_set_descriptor_access_exiting(), which is declared only
when VM_EVENT=y
---
xen/arch/x86/hvm/Makefile | 2 +-
xen/arch/x86/hvm/svm/svm.c | 8 +++++++-
xen/arch/x86/hvm/vmx/vmx.c | 10 ++++++++++
xen/arch/x86/include/asm/hvm/hvm.h | 18 +++++++++++-------
xen/arch/x86/include/asm/monitor.h | 9 +++++++++
5 files changed, 38 insertions(+), 9 deletions(-)
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 6ec2c8f2db..50e0b6e63b 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -16,7 +16,7 @@ obj-y += io.o
obj-y += ioreq.o
obj-y += irq.o
obj-y += mmio.o
-obj-y += monitor.o
+obj-$(CONFIG_VM_EVENT) += monitor.o
obj-y += mtrr.o
obj-y += nestedhvm.o
obj-y += pmtimer.o
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 9de2fd950e..06e4572d89 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -299,6 +299,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int
flags)
__clear_bit(msr * 2 + 1, msr_bit);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t
msr)
{
struct vcpu *v;
@@ -306,6 +307,7 @@ static void cf_check svm_enable_msr_interception(struct
domain *d, uint32_t msr)
for_each_vcpu ( d, v )
svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
}
+#endif /* CONFIG_VM_EVENT */
static void svm_save_dr(struct vcpu *v)
{
@@ -826,6 +828,7 @@ static void cf_check svm_set_rdtsc_exiting(struct vcpu *v,
bool enable)
vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -843,6 +846,7 @@ static void cf_check svm_set_descriptor_access_exiting(
vmcb_set_general1_intercepts(vmcb, general1_intercepts);
}
+#endif /* CONFIG_VM_EVENT */
static unsigned int cf_check svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
@@ -2457,9 +2461,11 @@ static struct hvm_function_table __initdata_cf_clobber
svm_function_table = {
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = svm_enable_msr_interception,
- .set_rdtsc_exiting = svm_set_rdtsc_exiting,
.set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
+#endif
+ .set_rdtsc_exiting = svm_set_rdtsc_exiting,
.get_insn_bytes = svm_get_insn_bytes,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2e9da26016..d29c9a2ff2 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1520,6 +1520,7 @@ static void cf_check vmx_set_rdtsc_exiting(struct vcpu
*v, bool enable)
vmx_vmcs_exit(v);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -1534,6 +1535,7 @@ static void cf_check vmx_set_descriptor_access_exiting(
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
}
+#endif /* CONFIG_VM_EVENT */
static void cf_check vmx_init_hypercall_page(void *p)
{
@@ -2413,6 +2415,7 @@ static void cf_check vmx_handle_eoi(uint8_t vector, int
isr)
printk_once(XENLOG_WARNING "EOI for %02x but SVI=%02x\n", vector,
old_svi);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t
msr)
{
struct vcpu *v;
@@ -2420,6 +2423,7 @@ static void cf_check vmx_enable_msr_interception(struct
domain *d, uint32_t msr)
for_each_vcpu ( d, v )
vmx_set_msr_intercept(v, msr, VMX_MSR_W);
}
+#endif /* CONFIG_VM_EVENT */
#ifdef CONFIG_ALTP2M
@@ -2871,7 +2875,9 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = vmx_enable_msr_interception,
+#endif
#ifdef CONFIG_ALTP2M
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
@@ -3079,9 +3085,11 @@ const struct hvm_function_table * __init start_vmx(void)
vmx_function_table.caps.singlestep = cpu_has_monitor_trap_flag;
+#ifdef CONFIG_VM_EVENT
if ( cpu_has_vmx_dt_exiting )
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
/*
* Do not enable EPT when (!cpu_has_vmx_pat), to prevent security hole
@@ -3152,8 +3160,10 @@ void __init vmx_fill_funcs(void)
if ( !cpu_has_xen_ibt )
return;
+#ifdef CONFIG_VM_EVENT
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
vmx_function_table.process_isr = vmx_process_isr;
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691e..da00ed0694 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -192,7 +192,11 @@ struct hvm_function_table {
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
+
+#ifdef CONFIG_VM_EVENT
void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
+ void (*enable_msr_interception)(struct domain *d, uint32_t msr);
+#endif
/* Nested HVM */
int (*nhvm_vcpu_initialise)(struct vcpu *v);
@@ -224,8 +228,6 @@ struct hvm_function_table {
paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, struct npfec npfec);
- void (*enable_msr_interception)(struct domain *d, uint32_t msr);
-
#ifdef CONFIG_ALTP2M
/* Alternate p2m */
void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
@@ -433,11 +435,18 @@ static inline bool using_svm(void)
#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))
+#ifdef CONFIG_VM_EVENT
static inline bool hvm_has_set_descriptor_access_exiting(void)
{
return hvm_funcs.set_descriptor_access_exiting;
}
+static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
+{
+ alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
+}
+#endif /* CONFIG_VM_EVENT */
+
static inline void hvm_domain_creation_finished(struct domain *d)
{
if ( hvm_funcs.domain_creation_finished )
@@ -679,11 +688,6 @@ static inline int nhvm_hap_walk_L1_p2m(
v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
}
-static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
-{
- alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
-}
-
static inline bool hvm_is_singlestep_supported(void)
{
return hvm_funcs.caps.singlestep;
diff --git a/xen/arch/x86/include/asm/monitor.h
b/xen/arch/x86/include/asm/monitor.h
index 3c64d8258f..9249324fd0 100644
--- a/xen/arch/x86/include/asm/monitor.h
+++ b/xen/arch/x86/include/asm/monitor.h
@@ -71,6 +71,7 @@ int arch_monitor_domctl_op(struct domain *d, struct
xen_domctl_monitor_op *mop)
return rc;
}
+#ifdef CONFIG_VM_EVENT
static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
{
uint32_t capabilities = 0;
@@ -102,6 +103,7 @@ static inline uint32_t arch_monitor_get_capabilities(struct
domain *d)
return capabilities;
}
+#endif /* CONFIG_VM_EVENT */
int arch_monitor_domctl_event(struct domain *d,
struct xen_domctl_monitor_op *mop);
@@ -123,7 +125,14 @@ static inline void arch_monitor_cleanup_domain(struct
domain *d) {}
#endif
+#ifdef CONFIG_VM_EVENT
bool monitored_msr(const struct domain *d, u32 msr);
+#else
+static inline bool monitored_msr(const struct domain *d, u32 msr)
+{
+ return false;
+}
+#endif
bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
#endif /* __ASM_X86_MONITOR_H__ */
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |