[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/7] x86/pagewalk: Helpers for reserved bit handling
Some bits are unconditionally reserved in pagetable entries, or reserved because of alignment restrictions. Other bits are reserved because of control register configuration. Introduce helpers which take an individual vcpu and guest pagetable entry, and calculates whether any reserved bits are set. While here, add a couple of newlines to aid readability, drop some trailing whitespace and bool/const correct the existing helpers to allow the new helpers to take const vcpu pointers. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> --- xen/include/asm-x86/guest_pt.h | 98 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 90 insertions(+), 8 deletions(-) diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h index 0bf6cf9..1c3d384 100644 --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -44,6 +44,18 @@ gfn_to_paddr(gfn_t gfn) #undef get_gfn #define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) +/* Mask covering the reserved bits from superpage alignment. */ +#define SUPERPAGE_RSVD(bit) \ + (((1ULL << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1))) + +static inline uint32_t fold_pse36(uint64_t val) +{ + return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 32)) >> (32 - 13)); +} +static inline uint64_t unfold_pse36(uint32_t val) +{ + return (val & ~(0x1ffULL << 13)) | ((val & (0x1ffULL << 13)) << (32 - 13)); +} /* Types of the guest's page tables and access functions for them */ @@ -51,9 +63,13 @@ gfn_to_paddr(gfn_t gfn) #define GUEST_L1_PAGETABLE_ENTRIES 1024 #define GUEST_L2_PAGETABLE_ENTRIES 1024 + #define GUEST_L1_PAGETABLE_SHIFT 12 #define GUEST_L2_PAGETABLE_SHIFT 22 +#define GUEST_L1_PAGETABLE_RSVD 0 +#define GUEST_L2_PAGETABLE_RSVD 0 + typedef uint32_t guest_intpte_t; typedef struct { guest_intpte_t l1; } guest_l1e_t; typedef struct { guest_intpte_t l2; } guest_l2e_t; @@ -88,21 +104,39 @@ static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) #else /* GUEST_PAGING_LEVELS != 2 */ #if GUEST_PAGING_LEVELS == 3 + #define GUEST_L1_PAGETABLE_ENTRIES 512 #define GUEST_L2_PAGETABLE_ENTRIES 512 #define GUEST_L3_PAGETABLE_ENTRIES 4 + #define GUEST_L1_PAGETABLE_SHIFT 12 #define GUEST_L2_PAGETABLE_SHIFT 21 #define GUEST_L3_PAGETABLE_SHIFT 30 + +#define GUEST_L1_PAGETABLE_RSVD 0x7ff0000000000000UL +#define GUEST_L2_PAGETABLE_RSVD 0x7ff0000000000000UL +#define GUEST_L3_PAGETABLE_RSVD \ + (0xfff0000000000000UL | _PAGE_GLOBAL | _PAGE_PSE | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_USER | _PAGE_RW) + #else /* GUEST_PAGING_LEVELS == 4 */ + #define GUEST_L1_PAGETABLE_ENTRIES 512 #define GUEST_L2_PAGETABLE_ENTRIES 512 #define GUEST_L3_PAGETABLE_ENTRIES 512 #define GUEST_L4_PAGETABLE_ENTRIES 512 + #define GUEST_L1_PAGETABLE_SHIFT 12 #define GUEST_L2_PAGETABLE_SHIFT 21 #define GUEST_L3_PAGETABLE_SHIFT 30 #define GUEST_L4_PAGETABLE_SHIFT 39 + +#define GUEST_L1_PAGETABLE_RSVD 0 +#define GUEST_L2_PAGETABLE_RSVD 0 +#define GUEST_L3_PAGETABLE_RSVD 0 +/* NB L4e._PAGE_GLOBAL is reserved for AMD, but ignored for Intel. */ +#define GUEST_L4_PAGETABLE_RSVD _PAGE_PSE + #endif typedef l1_pgentry_t guest_l1e_t; @@ -170,27 +204,30 @@ static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) /* Which pagetable features are supported on this vcpu? */ -static inline int -guest_supports_superpages(struct vcpu *v) +static inline bool guest_supports_superpages(const struct vcpu *v) { /* The _PAGE_PSE bit must be honoured in HVM guests, whenever - * CR4.PSE is set or the guest is in PAE or long mode. + * CR4.PSE is set or the guest is in PAE or long mode. * It's also used in the dummy PT for vcpus with CR4.PG cleared. */ return (is_pv_vcpu(v) ? opt_allow_superpage - : (GUEST_PAGING_LEVELS != 2 + : (GUEST_PAGING_LEVELS != 2 || !hvm_paging_enabled(v) || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE))); } -static inline int -guest_supports_1G_superpages(struct vcpu *v) +static inline bool guest_has_pse36(const struct vcpu *v) +{ + /* No support for 2-level PV guests. */ + return is_pv_vcpu(v) ? 0 : paging_mode_hap(v->domain); +} + +static inline bool guest_supports_1G_superpages(const struct vcpu *v) { return (GUEST_PAGING_LEVELS >= 4 && hvm_pse1gb_supported(v->domain)); } -static inline int -guest_supports_nx(struct vcpu *v) +static inline bool guest_supports_nx(const struct vcpu *v) { if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx ) return 0; @@ -213,6 +250,51 @@ guest_supports_nx(struct vcpu *v) #define _PAGE_INVALID_BITS _PAGE_INVALID_BIT #endif +/* Helpers for identifying whether guest entries have reserved bits set. */ + +/* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */ +static inline uint64_t guest_rsvd_bits(const struct vcpu *v) +{ + return ((PADDR_MASK & + ~((1UL << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) | + (guest_supports_nx(v) ? 0 : put_pte_flags(_PAGE_NX_BIT))); +} + +static inline bool guest_l1e_rsvd_bits(const struct vcpu *v, guest_l1e_t l1e) +{ + return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD); +} + +static inline bool guest_l2e_rsvd_bits(const struct vcpu *v, guest_l2e_t l2e) +{ + uint64_t rsvd_bits = guest_rsvd_bits(v); + + return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD | + (guest_supports_superpages(v) ? 0 : _PAGE_PSE))) || + ((l2e.l2 & _PAGE_PSE) && + (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_has_pse36(v)) + ? (fold_pse36(rsvd_bits | (1ULL << 40))) + : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT))))); +} + +#if GUEST_PAGING_LEVELS >= 3 +static inline bool guest_l3e_rsvd_bits(const struct vcpu *v, guest_l3e_t l3e) +{ + return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD | + (guest_supports_1G_superpages(v) ? 0 : _PAGE_PSE))) || + ((l3e.l3 & _PAGE_PSE) && + (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT)))); +} + +#if GUEST_PAGING_LEVELS >= 4 +static inline bool guest_l4e_rsvd_bits(const struct vcpu *v, guest_l4e_t l4e) +{ + return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD | + ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD) + ? _PAGE_GLOBAL : 0)); +} +#endif /* GUEST_PAGING_LEVELS >= 4 */ +#endif /* GUEST_PAGING_LEVELS >= 3 */ /* Type used for recording a walk through guest pagetables. It is * filled in by the pagetable walk function, and also used as a cache -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |