|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/altp2m: Wrap altp2m-specific code in #ifdef CONFIG_ALTP2M
commit 8f694f789efeff97d6e809a7ea856b032539d4a0
Author: Petr Beneš <w1benny@xxxxxxxxx>
AuthorDate: Mon Aug 25 12:49:23 2025 +0200
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Aug 25 12:49:23 2025 +0200
x86/altp2m: Wrap altp2m-specific code in #ifdef CONFIG_ALTP2M
This change consistently guards all altp2m-related functionality
behind #ifdef CONFIG_ALTP2M, so that code and data structures related
to alternate p2m views are only included when the feature is enabled.
Apart from that:
- hvmemul_vmfunc() returns X86EMUL_UNHANDLEABLE when altp2m is disabled.
- do_altp2m_op() returns EOPNOTSUPP when altp2m is disabled.
- struct hvm_vcpu, arch_domain, and hvm_function_table only define altp2m
fields when the feature is enabled.
- Moved several declarations under #ifdef CONFIG_ALTP2M in p2m.h to avoid
polluting builds that don't require the feature.
Signed-off-by: Petr Beneš <w1benny@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx> # mem_access
---
xen/arch/x86/hvm/emulate.c | 4 ++++
xen/arch/x86/hvm/hvm.c | 10 ++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 8 ++++++++
xen/arch/x86/include/asm/domain.h | 2 ++
xen/arch/x86/include/asm/hvm/hvm.h | 6 ++++++
xen/arch/x86/include/asm/hvm/vcpu.h | 4 ++++
xen/arch/x86/include/asm/p2m.h | 31 ++++++++++++++++++++-----------
xen/arch/x86/mm/hap/hap.c | 6 ++++++
xen/arch/x86/mm/mem_access.c | 8 ++++++++
xen/arch/x86/mm/p2m-ept.c | 4 ++++
xen/arch/x86/mm/p2m.c | 9 +++++++++
xen/arch/x86/vm_event.c | 2 +-
12 files changed, 82 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f1412d8c49..2af4f30359 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2704,6 +2704,7 @@ static int cf_check hvmemul_tlb_op(
static int cf_check hvmemul_vmfunc(
struct x86_emulate_ctxt *ctxt)
{
+#ifdef CONFIG_ALTP2M
int rc;
if ( !hvm_funcs.altp2m_vcpu_emulate_vmfunc )
@@ -2713,6 +2714,9 @@ static int cf_check hvmemul_vmfunc(
x86_emul_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC, ctxt);
return rc;
+#else
+ return X86EMUL_UNHANDLEABLE;
+#endif
}
static const struct x86_emulate_ops hvm_emulate_ops = {
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bd64faf207..646a0b682d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4611,6 +4611,7 @@ static int hvmop_get_param(
static int do_altp2m_op(
XEN_GUEST_HANDLE_PARAM(void) arg)
{
+#ifdef CONFIG_ALTP2M
struct xen_hvm_altp2m_op a;
struct domain *d = NULL;
int rc = 0;
@@ -4947,6 +4948,9 @@ static int do_altp2m_op(
rcu_unlock_domain(d);
return rc;
+#else /* !CONFIG_ALTP2M */
+ return -EOPNOTSUPP;
+#endif /* CONFIG_ALTP2M */
}
DEFINE_XEN_GUEST_HANDLE(compat_hvm_altp2m_op_t);
@@ -5238,8 +5242,12 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF:
v->arch.hvm.single_step = false;
+
+#ifdef CONFIG_ALTP2M
v->arch.hvm.fast_single_step.enabled = false;
v->arch.hvm.fast_single_step.p2midx = 0;
+#endif
+
break;
default: /* Excluded above */
@@ -5262,6 +5270,7 @@ void hvm_toggle_singlestep(struct vcpu *v)
v->arch.hvm.single_step = !v->arch.hvm.single_step;
}
+#ifdef CONFIG_ALTP2M
void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
{
ASSERT(atomic_read(&v->pause_count));
@@ -5276,6 +5285,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
v->arch.hvm.fast_single_step.enabled = true;
v->arch.hvm.fast_single_step.p2midx = p2midx;
}
+#endif
/*
* Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f97a774653..9894a029ca 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2420,6 +2420,8 @@ static void cf_check vmx_enable_msr_interception(struct
domain *d, uint32_t msr)
vmx_set_msr_intercept(v, msr, VMX_MSR_W);
}
+#ifdef CONFIG_ALTP2M
+
static void cf_check vmx_vcpu_update_eptp(struct vcpu *v)
{
struct domain *d = v->domain;
@@ -2539,6 +2541,8 @@ static bool cf_check vmx_vcpu_emulate_ve(struct vcpu *v)
return rc;
}
+#endif /* CONFIG_ALTP2M */
+
static bool cf_check vmx_get_pending_event(
struct vcpu *v, struct x86_event *info)
{
@@ -2867,10 +2871,12 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.enable_msr_interception = vmx_enable_msr_interception,
+#ifdef CONFIG_ALTP2M
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
.altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
.altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
+#endif
.vmtrace_control = vmtrace_control,
.vmtrace_output_position = vmtrace_output_position,
.vmtrace_set_option = vmtrace_set_option,
@@ -4967,6 +4973,7 @@ bool asmlinkage vmx_vmenter_helper(const struct
cpu_user_regs *regs)
single = ept;
}
+#ifdef CONFIG_ALTP2M
if ( altp2m_active(currd) )
{
unsigned int i;
@@ -4985,6 +4992,7 @@ bool asmlinkage vmx_vmenter_helper(const struct
cpu_user_regs *regs)
}
}
}
+#endif
if ( inv )
__invept(inv == 1 ? INVEPT_SINGLE_CONTEXT : INVEPT_ALL_CONTEXT,
diff --git a/xen/arch/x86/include/asm/domain.h
b/xen/arch/x86/include/asm/domain.h
index c31e74c6fa..ffcec69a15 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -351,12 +351,14 @@ struct arch_domain
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
mm_lock_t nested_p2m_lock;
+#ifdef CONFIG_ALTP2M
/* altp2m: allow multiple copies of host p2m */
bool altp2m_active;
struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
mm_lock_t altp2m_list_lock;
uint64_t *altp2m_eptp;
uint64_t *altp2m_visible_eptp;
+#endif
#endif
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index 5c5a790f9e..f02183691e 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -226,11 +226,13 @@ struct hvm_function_table {
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
+#ifdef CONFIG_ALTP2M
/* Alternate p2m */
void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
bool (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
+#endif
/* vmtrace */
int (*vmtrace_control)(struct vcpu *v, bool enable, bool reset);
@@ -704,6 +706,7 @@ static inline bool hvm_nested_virt_supported(void)
return hvm_funcs.caps.nested_virt;
}
+#ifdef CONFIG_ALTP2M
/* updates the current hardware p2m */
static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
{
@@ -728,6 +731,9 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
}
return false;
}
+#else /* !CONFIG_ALTP2M */
+bool altp2m_vcpu_emulate_ve(struct vcpu *v);
+#endif /* CONFIG_ALTP2M */
static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h
b/xen/arch/x86/include/asm/hvm/vcpu.h
index 196fed6d5d..924af890c5 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -133,10 +133,12 @@ struct hvm_vcpu {
bool flag_dr_dirty;
bool debug_state_latch;
bool single_step;
+#ifdef CONFIG_ALTP2M
struct {
bool enabled;
uint16_t p2midx;
} fast_single_step;
+#endif
/* (MFN) hypervisor page table */
pagetable_t monitor_table;
@@ -154,7 +156,9 @@ struct hvm_vcpu {
struct nestedvcpu nvcpu;
+#ifdef CONFIG_ALTP2M
struct altp2mvcpu avcpu;
+#endif
struct mtrr_state mtrr;
u64 pat_cr;
diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h
index c53f4e487d..5ce0dc936f 100644
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -889,6 +889,8 @@ void shadow_p2m_init(struct p2m_domain *p2m);
void cf_check nestedp2m_write_p2m_entry_post(
struct p2m_domain *p2m, unsigned int oflags);
+#ifdef CONFIG_ALTP2M
+
/*
* Alternate p2m: shadow p2m tables used for alternate memory views
*/
@@ -932,11 +934,6 @@ bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned
int idx);
/* Flush all the alternate p2m's for a domain */
void p2m_flush_altp2m(struct domain *d);
-/* Alternate p2m paging */
-bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
- mfn_t *mfn, p2m_type_t *p2mt,
- p2m_access_t *p2ma, unsigned int *page_order);
-
/* Make a specific alternate p2m valid */
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
@@ -954,17 +951,29 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d,
unsigned int idx);
int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
gfn_t old_gfn, gfn_t new_gfn);
-/* Propagate a host p2m change to all alternate p2m's */
-int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
- mfn_t mfn, unsigned int page_order,
- p2m_type_t p2mt, p2m_access_t p2ma);
-
/* Set a specific p2m view visibility */
int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
uint8_t visible);
-#else /* !CONFIG_HVM */
+#else /* !CONFIG_ALTP2M */
struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
+bool p2m_set_altp2m(struct vcpu *v, unsigned int idx);
+#endif /* CONFIG_ALTP2M */
+
+/*
+ * Common alternate p2m declarations that need to be visible
+ * regardless of CONFIG_ALTP2M
+ */
+
+/* Alternate p2m paging */
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+ mfn_t *mfn, p2m_type_t *p2mt,
+ p2m_access_t *p2ma, unsigned int *page_order);
+
+/* Propagate a host p2m change to all alternate p2m's */
+int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
+ mfn_t mfn, unsigned int page_order,
+ p2m_type_t p2mt, p2m_access_t p2ma);
#endif /* CONFIG_HVM */
/* p2m access to IOMMU flags */
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index ec5043a8aa..384c24028f 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -494,6 +494,7 @@ int hap_enable(struct domain *d, u32 mode)
goto out;
}
+#ifdef CONFIG_ALTP2M
if ( hvm_altp2m_supported() )
{
/* Init alternate p2m data */
@@ -524,6 +525,7 @@ int hap_enable(struct domain *d, u32 mode)
d->arch.altp2m_active = false;
}
+#endif /* CONFIG_ALTP2M */
/* Now let other users see the new mode */
d->arch.paging.mode = mode | PG_HAP_enable;
@@ -537,9 +539,11 @@ void hap_final_teardown(struct domain *d)
{
unsigned int i;
+#ifdef CONFIG_ALTP2M
if ( hvm_altp2m_supported() )
for ( i = 0; i < MAX_ALTP2M; i++ )
p2m_teardown(d->arch.altp2m_p2m[i], true, NULL);
+#endif
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
@@ -578,6 +582,7 @@ void hap_teardown(struct domain *d, bool *preempted)
for_each_vcpu ( d, v )
hap_vcpu_teardown(v);
+#ifdef CONFIG_ALTP2M
/* Leave the root pt in case we get further attempts to modify the p2m. */
if ( hvm_altp2m_supported() )
{
@@ -597,6 +602,7 @@ void hap_teardown(struct domain *d, bool *preempted)
return;
}
}
+#endif
/* Destroy nestedp2m's after altp2m. */
for ( i = 0; i < MAX_NESTEDP2M; i++ )
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 21b5b7ecda..0779c41161 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -347,6 +347,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
unsigned long gfn_l;
long rc = 0;
+#ifdef CONFIG_ALTP2M
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
@@ -357,6 +358,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
}
+#endif
if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
return -EINVAL;
@@ -403,6 +405,7 @@ long p2m_set_mem_access_multi(struct domain *d,
struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
long rc = 0;
+#ifdef CONFIG_ALTP2M
/* altp2m view 0 is treated as the hostp2m */
if ( altp2m_idx )
{
@@ -413,6 +416,7 @@ long p2m_set_mem_access_multi(struct domain *d,
ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
}
+#endif
p2m_lock(p2m);
if ( ap2m )
@@ -462,6 +466,7 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn,
xenmem_access_t *access,
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
+#ifdef CONFIG_ALTP2M
if ( !altp2m_active(d) )
{
if ( altp2m_idx )
@@ -476,6 +481,7 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn,
xenmem_access_t *access,
p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
}
+#endif
return _p2m_get_mem_access(p2m, gfn, access);
}
@@ -486,6 +492,7 @@ void arch_p2m_set_access_required(struct domain *d, bool
access_required)
p2m_get_hostp2m(d)->access_required = access_required;
+#ifdef CONFIG_ALTP2M
if ( altp2m_active(d) )
{
unsigned int i;
@@ -497,6 +504,7 @@ void arch_p2m_set_access_required(struct domain *d, bool
access_required)
p2m->access_required = access_required;
}
}
+#endif
}
bool p2m_mem_access_sanity_check(const struct domain *d)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 62fc8e5068..ae262bfcc2 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1297,6 +1297,7 @@ static void ept_set_ad_sync(struct domain *d, bool value)
hostp2m->ept.ad = value;
+#ifdef CONFIG_ALTP2M
if ( unlikely(altp2m_active(d)) )
{
unsigned int i;
@@ -1315,6 +1316,7 @@ static void ept_set_ad_sync(struct domain *d, bool value)
p2m_unlock(p2m);
}
}
+#endif
}
static void ept_enable_pml(struct p2m_domain *p2m)
@@ -1571,6 +1573,7 @@ void __init setup_ept_dump(void)
register_keyhandler('D', ept_dump_p2m_table, "dump VT-x EPT tables", 0);
}
+#ifdef CONFIG_ALTP2M
void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
{
struct p2m_domain *p2m = array_access_nospec(d->arch.altp2m_p2m, i);
@@ -1610,6 +1613,7 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d,
uint64_t eptp)
altp2m_list_unlock(d);
return i;
}
+#endif /* CONFIG_ALTP2M */
/*
* Local variables:
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b9a7c2dc53..5a3fda903e 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -101,6 +101,7 @@ void p2m_change_entry_type_global(struct domain *d,
change_entry_type_global(hostp2m, ot, nt);
+#ifdef CONFIG_ALTP2M
if ( unlikely(altp2m_active(d)) )
{
unsigned int i;
@@ -117,6 +118,7 @@ void p2m_change_entry_type_global(struct domain *d,
}
}
}
+#endif
p2m_unlock(hostp2m);
}
@@ -145,6 +147,7 @@ bool p2m_memory_type_changed(struct domain *d)
_memory_type_changed(hostp2m);
+#ifdef CONFIG_ALTP2M
if ( unlikely(altp2m_active(d)) )
{
unsigned int i;
@@ -161,6 +164,7 @@ bool p2m_memory_type_changed(struct domain *d)
}
}
}
+#endif
p2m_unlock(hostp2m);
@@ -930,6 +934,7 @@ void p2m_change_type_range(struct domain *d,
change_type_range(hostp2m, start, end, ot, nt);
+#ifdef CONFIG_ALTP2M
if ( unlikely(altp2m_active(d)) )
{
unsigned int i;
@@ -946,6 +951,8 @@ void p2m_change_type_range(struct domain *d,
}
}
}
+#endif
+
hostp2m->defer_nested_flush = false;
if ( nestedhvm_enabled(d) )
p2m_flush_nestedp2m(d);
@@ -1003,6 +1010,7 @@ int p2m_finish_type_change(struct domain *d,
if ( rc < 0 )
goto out;
+#ifdef CONFIG_ALTP2M
if ( unlikely(altp2m_active(d)) )
{
unsigned int i;
@@ -1022,6 +1030,7 @@ int p2m_finish_type_change(struct domain *d,
}
}
}
+#endif
out:
p2m_unlock(hostp2m);
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 0d15d363c3..fc349270b9 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -72,7 +72,7 @@ void vm_event_toggle_singlestep(struct domain *d, struct vcpu
*v,
if ( rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
hvm_toggle_singlestep(v);
- else
+ else if ( IS_ENABLED(CONFIG_ALTP2M) )
hvm_fast_singlestep(v, rsp->u.fast_singlestep.p2midx);
}
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |