|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/paging: replace !paging_mode_hap() with paging_mode_shadow()
commit dcca45531f708cec1207fa4a309ab10802edec83
Author: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
AuthorDate: Thu Nov 20 11:25:27 2025 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Nov 20 11:25:27 2025 +0100
x86/paging: replace !paging_mode_hap() with paging_mode_shadow()
Now Xen, for HVM/PVH, supports only two paging modes: HAP and SHADOW, so
!paging_mode_hap() is actually means paging_mode_shadow().
For an abstract, future case of there being a 3rd paging mode it is also
better to explicitly mention checked paging mode (SHADOW) instead of using
negative check of another paging mode (HAP).
Hence, s/!paging_mode_hap()/paging_mode_shadow() which also allows DCE drop
unused code when SHADOW_PAGING=n.
The !paging_mode_hap() in hap.c not changed as HAP is checking for itself
to be enabled.
Inspired by [1].
[1]
https://patchwork.kernel.org/project/xen-devel/patch/20251111200958.3576341-3-grygorii_strashko@xxxxxxxx/
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
Reviewed-by: Jason Andryuk <jason.andryuk@xxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/arch/x86/cpu/mcheck/vmce.c | 2 +-
xen/arch/x86/hvm/domain.c | 2 +-
xen/arch/x86/hvm/hvm.c | 10 +++++-----
xen/arch/x86/hvm/svm/nestedsvm.c | 2 +-
xen/arch/x86/hvm/svm/svm.c | 8 ++++----
xen/arch/x86/hvm/vmx/vmcs.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/mm/p2m-ept.c | 2 +-
8 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 5abdf4cb5f..1a7e92506a 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -525,7 +525,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn,
unsigned long gfn)
if ( !mfn_valid(mfn) )
return -EINVAL;
- if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+ if ( !is_hvm_domain(d) || paging_mode_shadow(d) )
return -EOPNOTSUPP;
rc = -1;
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 048f29ae49..37092f31f3 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -287,7 +287,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const struct
vcpu_hvm_context *ctx)
hvm_update_guest_cr(v, 4);
hvm_update_guest_efer(v);
- if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+ if ( hvm_paging_enabled(v) && paging_mode_shadow(v->domain) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
struct page_info *page = get_page_from_gfn(v->domain,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7b9fc3ac16..4a8e339f97 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2317,7 +2317,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
hvm_update_guest_efer(v);
}
- if ( !paging_mode_hap(d) )
+ if ( paging_mode_shadow(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
@@ -2368,7 +2368,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
hvm_update_guest_efer(v);
}
- if ( !paging_mode_hap(d) )
+ if ( paging_mode_shadow(d) )
{
put_page(pagetable_get_page(v->arch.guest_table));
v->arch.guest_table = pagetable_null();
@@ -2422,7 +2422,7 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool
may_defer)
}
}
- if ( hvm_paging_enabled(curr) && !paging_mode_hap(currd) &&
+ if ( hvm_paging_enabled(curr) && paging_mode_shadow(currd) &&
((value ^ curr->arch.hvm.guest_cr[3]) >> PAGE_SHIFT) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
@@ -3966,7 +3966,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
if ( v->is_initialised )
goto out;
- if ( !paging_mode_hap(d) )
+ if ( paging_mode_shadow(d) )
{
if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
@@ -4240,7 +4240,7 @@ static int hvm_set_param(struct domain *d, uint32_t
index, uint64_t value)
* Only actually required for VT-x lacking unrestricted_guest
* capabilities. Short circuit the pause if possible.
*/
- if ( !paging_mode_hap(d) || !using_vmx() )
+ if ( paging_mode_shadow(d) || !using_vmx() )
{
d->arch.hvm.params[index] = value;
break;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index dc2b6a4253..1813692ffb 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1341,7 +1341,7 @@ nestedsvm_check_intercepts(struct vcpu *v, struct
cpu_user_regs *regs,
/* l2 guest intercepts #PF unnecessarily */
return NESTEDHVM_VMEXIT_INJECT;
}
- if ( !paging_mode_hap(v->domain) )
+ if ( paging_mode_shadow(v->domain) )
/* host shadow paging + guest shadow paging */
return NESTEDHVM_VMEXIT_HOST;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 5b2b56edf8..2d7c598ffe 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -171,7 +171,7 @@ static void cf_check svm_update_guest_cr(
}
value = v->arch.hvm.guest_cr[0] | hw_cr0_mask;
- if ( !paging_mode_hap(v->domain) )
+ if ( paging_mode_shadow(v->domain) )
value |= X86_CR0_PG | X86_CR0_WP;
vmcb_set_cr0(vmcb, value);
break;
@@ -440,7 +440,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
}
}
- if ( !paging_mode_hap(v->domain) )
+ if ( paging_mode_shadow(v->domain) )
{
if ( c->cr0 & X86_CR0_PG )
{
@@ -762,7 +762,7 @@ static int cf_check svm_set_guest_pat(struct vcpu *v, u64
gpat)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- if ( !paging_mode_hap(v->domain) )
+ if ( paging_mode_shadow(v->domain) )
return 0;
vmcb_set_g_pat(vmcb, gpat);
@@ -773,7 +773,7 @@ static int cf_check svm_get_guest_pat(struct vcpu *v, u64
*gpat)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- if ( !paging_mode_hap(v->domain) )
+ if ( paging_mode_shadow(v->domain) )
return 0;
*gpat = vmcb_get_g_pat(vmcb);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index cd5ac8a5f0..d610988bf9 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -135,7 +135,7 @@ static int cf_check parse_ept_param_runtime(const char *s)
for_each_domain ( d )
{
/* PV, or HVM Shadow domain? Not applicable. */
- if ( !paging_mode_hap(d) )
+ if ( paging_mode_shadow(d) )
continue;
/* Hardware domain? Not applicable. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 4ce213284f..6b407226c4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1426,7 +1426,7 @@ static void cf_check vmx_set_segment_register(
static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
- if ( !paging_mode_hap(v->domain) ||
+ if ( paging_mode_shadow(v->domain) ||
unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
return 0;
@@ -1438,7 +1438,7 @@ static int cf_check vmx_set_guest_pat(struct vcpu *v, u64
gpat)
static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
{
- if ( !paging_mode_hap(v->domain) ||
+ if ( paging_mode_shadow(v->domain) ||
unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
return 0;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index b854a08b4c..ce4ef632ae 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1270,7 +1270,7 @@ void ept_sync_domain(struct p2m_domain *p2m)
struct domain *d = p2m->domain;
/* Only if using EPT and this domain has some VCPUs to dirty. */
- if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
+ if ( paging_mode_shadow(d) || !d->vcpu || !d->vcpu[0] )
return;
ept_sync_domain_prepare(p2m);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |