[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 1/6] x86/pagewalk: Clean up guest_supports_* predicates



Switch them to returning bool, and taking const parameters.

Rename guest_supports_superpages() to guest_can_use_l2_superpages() to
indicate which level of pagetables it is actually referring to as well as
indicating that it is more complicated than just control register settings,
and rename guest_supports_1G_superpages() to guest_can_use_l3_superpages() for
consistency.

guest_can_use_l3_superpages() is a static property of the domain, rather than
control register settings, so is switched to take a domain pointer.
hvm_pse1gb_supported() is inlined into its sole user because it isn't strictly
hvm-specific (it is hap-specific) and really should be beside a comment
explaining why the cpuid policy is ignored.

guest_supports_nx() on the other hand refers simply to a control register bit,
and is renamed to guest_nx_enabled().

While cleaning up part of the file, clean up all trailing whilespace, and fix
one comment which accidently refered to PG living in CR4 rather than CR0.

Requested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

v3:
 * guest_can_use_...()
 * Adjust comments to distinguish shadow and hap behaviour

v2:
 * New
---
 xen/arch/x86/mm/guest_walk.c   |  6 ++--
 xen/arch/x86/mm/shadow/multi.c | 12 +++----
 xen/include/asm-x86/guest_pt.h | 74 ++++++++++++++++++++++++------------------
 xen/include/asm-x86/hvm/hvm.h  |  4 ---
 4 files changed, 52 insertions(+), 44 deletions(-)

diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 8187226..c526363 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -59,7 +59,7 @@ const uint32_t gw_page_flags[] = {
 static uint32_t mandatory_flags(struct vcpu *v, uint32_t pfec) 
 {
     /* Don't demand not-NX if the CPU wouldn't enforce it. */
-    if ( !guest_supports_nx(v) )
+    if ( !guest_nx_enabled(v) )
         pfec &= ~PFEC_insn_fetch;
 
     /* Don't demand R/W if the CPU wouldn't enforce it. */
@@ -272,7 +272,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
             /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */
             flags &= ~_PAGE_PAT;
 
-        if ( !guest_supports_1G_superpages(v) )
+        if ( !guest_can_use_l3_superpages(d) )
             rc |= _PAGE_PSE | _PAGE_INVALID_BIT;
         if ( gfn_x(start) & GUEST_L3_GFN_MASK & ~0x1 )
             rc |= _PAGE_INVALID_BITS;
@@ -326,7 +326,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     }
     rc |= ((gflags & mflags) ^ mflags);
 
-    pse2M = (gflags & _PAGE_PSE) && guest_supports_superpages(v); 
+    pse2M = (gflags & _PAGE_PSE) && guest_can_use_l2_superpages(v);
 
     if ( pse2M )
     {
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 63c7ab5..95e2f85 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -238,7 +238,7 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t 
*gw, int version)
     l2p = (guest_l2e_t *)v->arch.paging.shadow.guest_vtable;
     mismatch |= (gw->l2e.l2 != l2p[guest_l2_table_offset(va)].l2);
 #endif
-    if ( !(guest_supports_superpages(v) &&
+    if ( !(guest_can_use_l2_superpages(v) &&
            (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) )
     {
         l1p = map_domain_page(gw->l1mfn);
@@ -310,7 +310,7 @@ gw_remove_write_accesses(struct vcpu *v, unsigned long va, 
walk_t *gw)
         rc |= GW_RMWR_FLUSHTLB;
 #endif /* GUEST_PAGING_LEVELS >= 3 */
 
-    if ( !(guest_supports_superpages(v) &&
+    if ( !(guest_can_use_l2_superpages(v) &&
            (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE))
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
          && !mfn_is_out_of_sync(gw->l1mfn)
@@ -591,7 +591,7 @@ _sh_propagate(struct vcpu *v,
     //
     pass_thru_flags = (_PAGE_ACCESSED | _PAGE_USER |
                        _PAGE_RW | _PAGE_PRESENT);
-    if ( guest_supports_nx(v) )
+    if ( guest_nx_enabled(v) )
         pass_thru_flags |= _PAGE_NX_BIT;
     if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
         pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
@@ -660,7 +660,7 @@ _sh_propagate(struct vcpu *v,
     if ( unlikely(((level == 1) ||
                    ((level == 2) &&
                     (gflags & _PAGE_PSE) &&
-                    guest_supports_superpages(v)))
+                    guest_can_use_l2_superpages(v)))
                   && !(gflags & _PAGE_DIRTY)) )
         sflags &= ~_PAGE_RW;
 
@@ -1846,7 +1846,7 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct 
vcpu *v,
         /* No l1 shadow installed: find and install it. */
         if ( !(flags & _PAGE_PRESENT) )
             return NULL; /* No guest page. */
-        if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) )
+        if ( guest_can_use_l2_superpages(v) && (flags & _PAGE_PSE) )
         {
             /* Splintering a superpage */
             gfn_t l2gfn = guest_l2e_get_gfn(gw->l2e);
@@ -2251,7 +2251,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
mfn_t sl2mfn, void *se)
     if ( guest_l2e_get_flags(new_gl2e) & _PAGE_PRESENT )
     {
         gfn_t gl1gfn = guest_l2e_get_gfn(new_gl2e);
-        if ( guest_supports_superpages(v) &&
+        if ( guest_can_use_l2_superpages(v) &&
              (guest_l2e_get_flags(new_gl2e) & _PAGE_PSE) )
         {
             // superpage -- need to look up the shadow L1 which holds the
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index bedc771..e23e968 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -2,7 +2,7 @@
  * xen/asm-x86/guest_pt.h
  *
  * Types and accessors for guest pagetable entries, as distinct from
- * Xen's pagetable types. 
+ * Xen's pagetable types.
  *
  * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including
  * this file.
@@ -10,17 +10,17 @@
  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
@@ -168,33 +168,43 @@ static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, 
u32 flags)
 
 /* Which pagetable features are supported on this vcpu? */
 
-static inline int
-guest_supports_superpages(struct vcpu *v)
+static inline bool guest_can_use_l2_superpages(const struct vcpu *v)
 {
-    /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
-     * CR4.PSE is set or the guest is in PAE or long mode. 
-     * It's also used in the dummy PT for vcpus with CR4.PG cleared. */
+    /*
+     * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever
+     * CR4.PSE is set or the guest is in PAE or long mode.
+     * It's also used in the dummy PT for vcpus with CR0.PG cleared.
+     */
     return (is_pv_vcpu(v)
             ? opt_allow_superpage
-            : (GUEST_PAGING_LEVELS != 2 
+            : (GUEST_PAGING_LEVELS != 2
                || !hvm_paging_enabled(v)
                || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
 }
 
-static inline int
-guest_supports_1G_superpages(struct vcpu *v)
+static inline bool guest_can_use_l3_superpages(const struct domain *d)
 {
-    return (GUEST_PAGING_LEVELS >= 4 && hvm_pse1gb_supported(v->domain));
+    /*
+     * There are no control register settings for the hardware pagewalk on the
+     * subject of 1G superpages.
+     *
+     * Shadow pagetables don't support 1GB superpages at all, and will always
+     * treat L3 _PAGE_PSE as reserved.
+     *
+     * With HAP however, if the guest constructs a 1GB superpage on capable
+     * hardware, it will function irrespective of whether the feature is
+     * advertised.  Xen's model of performing a pagewalk should match.
+     */
+    return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb;
 }
 
-static inline int
-guest_supports_nx(struct vcpu *v)
+static inline bool guest_nx_enabled(const struct vcpu *v)
 {
-    if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx )
-        return 0;
-    if ( is_pv_vcpu(v) )
-        return cpu_has_nx;
-    return hvm_nx_enabled(v);
+    if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */
+        return false;
+
+    /* PV guests can't control EFER.NX, and inherits Xen's choice. */
+    return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v);
 }
 
 
@@ -258,11 +268,11 @@ static inline paddr_t guest_walk_to_gpa(const walk_t *gw)
     return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK);
 }
 
-/* Given a walk_t from a successful walk, return the page-order of the 
+/* Given a walk_t from a successful walk, return the page-order of the
  * page or superpage that the virtual address is in. */
 static inline unsigned int guest_walk_to_page_order(const walk_t *gw)
 {
-    /* This is only valid for successful walks - otherwise the 
+    /* This is only valid for successful walks - otherwise the
      * PSE bits might be invalid. */
     ASSERT(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT);
 #if GUEST_PAGING_LEVELS >= 3
@@ -275,28 +285,30 @@ static inline unsigned int guest_walk_to_page_order(const 
walk_t *gw)
 }
 
 
-/* Walk the guest pagetables, after the manner of a hardware walker. 
+/*
+ * Walk the guest pagetables, after the manner of a hardware walker.
  *
- * Inputs: a vcpu, a virtual address, a walk_t to fill, a 
- *         pointer to a pagefault code, the MFN of the guest's 
- *         top-level pagetable, and a mapping of the 
+ * Inputs: a vcpu, a virtual address, a walk_t to fill, a
+ *         pointer to a pagefault code, the MFN of the guest's
+ *         top-level pagetable, and a mapping of the
  *         guest's top-level pagetable.
- * 
+ *
  * We walk the vcpu's guest pagetables, filling the walk_t with what we
  * see and adding any Accessed and Dirty bits that are needed in the
  * guest entries.  Using the pagefault code, we check the permissions as
  * we go.  For the purposes of reading pagetables we treat all non-RAM
  * memory as contining zeroes.
- * 
- * Returns 0 for success, or the set of permission bits that we failed on 
- * if the walk did not complete. */
+ *
+ * Returns 0 for success, or the set of permission bits that we failed on
+ * if the walk did not complete.
+ */
 
 /* Macro-fu so you can call guest_walk_tables() and get the right one. */
 #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
 #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
 #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
 
-extern uint32_t 
+extern uint32_t
 guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va,
                   walk_t *gw, uint32_t pfec, mfn_t top_mfn, void *top_map);
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index f9bb190..c854183 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -302,10 +302,6 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t 
dest, uint8_t dest_mode);
 #define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
 #define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))
 
-/* Can the guest use 1GB superpages in its own pagetables? */
-#define hvm_pse1gb_supported(d) \
-    (cpu_has_page1gb && paging_mode_hap(d))
-
 #define hvm_long_mode_enabled(v) \
     ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.