[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN][PATCH] x86/paging: replace !paging_mode_hap() with paging_mode_shadow()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
  • Date: Fri, 14 Nov 2025 14:44:55 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=4qEbw20aClhzRPqLWxFwoec8KrBEQot5TwF6HWMGbHQ=; b=hbhScX0EJDiXQvacTHwJv/0C6nWUc8UwgHEmpIAbfMEjSzEY0EBt6Z8iNqtNLo3wJpbu/8RVeaGG7PaNGhQmQodn+A0UdoaSEIu2SIrd1pc+AIjZAzegRI7ShfZ2rs91LN5S2EG01QSCp+f3aMLmrsfTdRSw3u5OcvN1GsN9VebltVc4vQqyf7SgKyzDtKA5J8SanmE5NulckNyyEdhWnUwsGR3Sse5ZD1T3j9CSnkh+3UJQdXJ+NfAvpZVlBThjvxWFHSBVTFEdj7kEZ0aCXnVTSkKjU8rwRheodPu/PIqZHzgCsBzZ7ZDs+stMkEq/RZZCrtR/somW994ylKLWbw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=NvCyHHgj7tSSdX4UUpUfP6E3XBIRNizaxBbSFCjXFvTOIGDkuU8j9fbHZqkKPSqXCJKudLPvNDGnZE7fsKAQ3/s+UhkMo2/xl0mOQmteZx4/dftZfJ6pc7QQHGA/WeOaUt5nOy8CA/I+L0lI3hYjtpOPFVswuTcaFyVlxD7Bkbol/S3ReZ9wvQdU+hRx4fkZ9t/v+4psK6CS4YL3w3pJBALf3CwYXEmjEMwgapU4Z8oojiBUi3c6WaWfJU4UScFaN+OtClTqgb0vuwXTDjoKdaHlHXcFM2lqDShHY9AzcmYsCzCOV5G0c/M7UbJm6zNQtZfkFo3XrtNFEZHHkwAsOw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Grygorii Strashko <grygorii_strashko@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Jason Andryuk <jason.andryuk@xxxxxxx>
  • Delivery-date: Fri, 14 Nov 2025 14:45:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcVXU91U+hErbh4Eaw0rWhlUZOxw==
  • Thread-topic: [XEN][PATCH] x86/paging: replace !paging_mode_hap() with paging_mode_shadow()

From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>

Now Xen supports only two paging modes: HAP and SHADOW, so
!paging_mode_hap() is actually means paging_mode_shadow().

For an abstract, future case of there being a 3rd paging mode it is also
better to explicitly mention checked paging mode (SHADOW) instead of using
negative check of another paging mode (HAP).

Hence, s/!paging_mode_hap()/paging_mode_shadow() which also allows DCE drop
unused code when SHADOW_PAGING=n.
The !paging_mode_hap() in hap.c not changed as HAP is checking for itself
to be enabled.

Inspired by [1].

[1] 
https://patchwork.kernel.org/project/xen-devel/patch/20251111200958.3576341-3-grygorii_strashko@xxxxxxxx/
Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c   |  2 +-
 xen/arch/x86/hvm/domain.c        |  2 +-
 xen/arch/x86/hvm/hvm.c           | 10 +++++-----
 xen/arch/x86/hvm/svm/nestedsvm.c |  2 +-
 xen/arch/x86/hvm/svm/svm.c       |  8 ++++----
 xen/arch/x86/hvm/vmx/vmcs.c      |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c       |  4 ++--
 xen/arch/x86/mm/p2m-ept.c        |  2 +-
 8 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 5abdf4cb5fd5..1a7e92506ac8 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -525,7 +525,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
unsigned long gfn)
     if ( !mfn_valid(mfn) )
         return -EINVAL;
 
-    if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+    if ( !is_hvm_domain(d) || paging_mode_shadow(d) )
         return -EOPNOTSUPP;
 
     rc = -1;
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 048f29ae4911..37092f31f3f7 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -287,7 +287,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const struct 
vcpu_hvm_context *ctx)
     hvm_update_guest_cr(v, 4);
     hvm_update_guest_efer(v);
 
-    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+    if ( hvm_paging_enabled(v) && paging_mode_shadow(v->domain) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         struct page_info *page = get_page_from_gfn(v->domain,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7299cfa90ad5..cb4924be9aff 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2317,7 +2317,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
             hvm_update_guest_efer(v);
         }
 
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
@@ -2368,7 +2368,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
             hvm_update_guest_efer(v);
         }
 
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
         {
             put_page(pagetable_get_page(v->arch.guest_table));
             v->arch.guest_table = pagetable_null();
@@ -2422,7 +2422,7 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool 
may_defer)
         }
     }
 
-    if ( hvm_paging_enabled(curr) && !paging_mode_hap(currd) &&
+    if ( hvm_paging_enabled(curr) && paging_mode_shadow(currd) &&
          ((value ^ curr->arch.hvm.guest_cr[3]) >> PAGE_SHIFT) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
@@ -3966,7 +3966,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, 
uint16_t ip)
     if ( v->is_initialised )
         goto out;
 
-    if ( !paging_mode_hap(d) )
+    if ( paging_mode_shadow(d) )
     {
         if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
             put_page(pagetable_get_page(v->arch.guest_table));
@@ -4241,7 +4241,7 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
          * Only actually required for VT-x lacking unrestricted_guest
          * capabilities.  Short circuit the pause if possible.
          */
-        if ( !paging_mode_hap(d) || !using_vmx() )
+        if ( paging_mode_shadow(d) || !using_vmx() )
         {
             d->arch.hvm.params[index] = value;
             break;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index dc2b6a42534a..1813692ffb03 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1341,7 +1341,7 @@ nestedsvm_check_intercepts(struct vcpu *v, struct 
cpu_user_regs *regs,
             /* l2 guest intercepts #PF unnecessarily */
             return NESTEDHVM_VMEXIT_INJECT;
         }
-        if ( !paging_mode_hap(v->domain) )
+        if ( paging_mode_shadow(v->domain) )
             /* host shadow paging + guest shadow paging */
             return NESTEDHVM_VMEXIT_HOST;
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b54f9d9af50e..73d6c7ed03f2 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -171,7 +171,7 @@ static void cf_check svm_update_guest_cr(
         }
 
         value = v->arch.hvm.guest_cr[0] | hw_cr0_mask;
-        if ( !paging_mode_hap(v->domain) )
+        if ( paging_mode_shadow(v->domain) )
             value |= X86_CR0_PG | X86_CR0_WP;
         vmcb_set_cr0(vmcb, value);
         break;
@@ -440,7 +440,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
         }
     }
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
     {
         if ( c->cr0 & X86_CR0_PG )
         {
@@ -762,7 +762,7 @@ static int cf_check svm_set_guest_pat(struct vcpu *v, u64 
gpat)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
         return 0;
 
     vmcb_set_g_pat(vmcb, gpat);
@@ -773,7 +773,7 @@ static int cf_check svm_get_guest_pat(struct vcpu *v, u64 
*gpat)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
         return 0;
 
     *gpat = vmcb_get_g_pat(vmcb);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 2fffc2006ab0..0b511939ac8a 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -135,7 +135,7 @@ static int cf_check parse_ept_param_runtime(const char *s)
     for_each_domain ( d )
     {
         /* PV, or HVM Shadow domain?  Not applicable. */
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
             continue;
 
         /* Hardware domain? Not applicable. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 3f0e113b0b67..c98ec110d144 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1432,7 +1432,7 @@ static void cf_check vmx_set_segment_register(
 
 static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
 {
-    if ( !paging_mode_hap(v->domain) ||
+    if ( paging_mode_shadow(v->domain) ||
          unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
@@ -1444,7 +1444,7 @@ static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 
gpat)
 
 static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
 {
-    if ( !paging_mode_hap(v->domain) ||
+    if ( paging_mode_shadow(v->domain) ||
          unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index b854a08b4c4f..ce4ef632ae0a 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1270,7 +1270,7 @@ void ept_sync_domain(struct p2m_domain *p2m)
     struct domain *d = p2m->domain;
 
     /* Only if using EPT and this domain has some VCPUs to dirty. */
-    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
+    if ( paging_mode_shadow(d) || !d->vcpu || !d->vcpu[0] )
         return;
 
     ept_sync_domain_prepare(p2m);
-- 
2.34.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.