[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86: limit GFNs to 32 bits for shadowed superpages.



commit 8b17648339ba801c4c7937b5f13dd25068e54e60
Author:     Tim Deegan <tim@xxxxxxx>
AuthorDate: Mon Mar 14 11:05:48 2016 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Apr 20 18:01:01 2016 +0100

    x86: limit GFNs to 32 bits for shadowed superpages.
    
    Superpage shadows store the shadowed GFN in the backpointer field,
    which for non-BIGMEM builds is 32 bits wide.  Shadowing a superpage
    mapping of a guest-physical address above 2^44 would lead to the GFN
    being truncated there, and a crash when we come to remove the shadow
    from the hash table.
    
    Track the valid width of a GFN for each guest, including reporting it
    through CPUID, and enforce it in the shadow pagetables.  Set the
    maximum witth to 32 for guests where this truncation could occur.
    
    This is XSA-173.
    
    Reported-by: Ling Liu <liuling-it@xxxxxx>
    Signed-off-by: Tim Deegan <tim@xxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/common.c         |  8 ++++++--
 xen/arch/x86/hvm/hvm.c            |  3 +--
 xen/arch/x86/mm/guest_walk.c      | 20 +++++++-------------
 xen/arch/x86/mm/hap/hap.c         |  2 ++
 xen/arch/x86/mm/p2m.c             |  6 ++++++
 xen/arch/x86/mm/shadow/common.c   | 10 ++++++++++
 xen/arch/x86/mm/shadow/multi.c    |  3 ++-
 xen/include/asm-x86/domain.h      |  3 +++
 xen/include/asm-x86/guest_pt.h    | 18 ++++++++++--------
 xen/include/asm-x86/processor.h   |  2 ++
 xen/include/asm-x86/x86_64/page.h |  6 ++++++
 11 files changed, 55 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index fe6eab4..f664341 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -45,6 +45,7 @@ struct cpuidmasks __read_mostly cpuidmask_defaults;
 const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {};
 
 unsigned int paddr_bits __read_mostly = 36;
+unsigned int hap_paddr_bits __read_mostly = 36;
 
 /*
  * Default host IA32_CR_PAT value to cover all memory types.
@@ -236,8 +237,11 @@ static void __init early_cpu_detect(void)
        c->x86_capability[cpufeat_word(X86_FEATURE_FPU)] = edx;
        c->x86_capability[cpufeat_word(X86_FEATURE_SSE3)] = ecx;
 
-       if ( cpuid_eax(0x80000000) >= 0x80000008 )
-               paddr_bits = cpuid_eax(0x80000008) & 0xff;
+       if ( cpuid_eax(0x80000000) >= 0x80000008 ) {
+               eax = cpuid_eax(0x80000008);
+               paddr_bits = eax & 0xff;
+               hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits;
+       }
 }
 
 static void generic_identify(struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f24126d..e9d4c6b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3504,8 +3504,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
         break;
 
     case 0x80000008:
-        count = cpuid_eax(0x80000008);
-        count = (count >> 16) & 0xff ?: count & 0xff;
+        count = d->arch.paging.gfn_bits + PAGE_SHIFT;
         if ( (*eax & 0xff) > count )
             *eax = (*eax & ~0xff) | count;
 
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 21cabba..625ac4d 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -342,20 +342,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
             flags &= ~_PAGE_PAT;
 
         if ( gfn_x(start) & GUEST_L2_GFN_MASK & ~0x1 )
-        {
-#if GUEST_PAGING_LEVELS == 2
-            /*
-             * Note that _PAGE_INVALID_BITS is zero in this case, yielding a
-             * no-op here.
-             *
-             * Architecturally, the walk should fail if bit 21 is set (others
-             * aren't being checked at least in PSE36 mode), but we'll ignore
-             * this here in order to avoid specifying a non-natural, non-zero
-             * _PAGE_INVALID_BITS value just for that case.
-             */
-#endif
             rc |= _PAGE_INVALID_BITS;
-        }
+
         /* Increment the pfn by the right number of 4k pages.  
          * Mask out PAT and invalid bits. */
         start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
@@ -441,5 +429,11 @@ set_ad:
         put_page(mfn_to_page(mfn_x(gw->l1mfn)));
     }
 
+    /* If this guest has a restricted physical address space then the
+     * target GFN must fit within it. */
+    if ( !(rc & _PAGE_PRESENT)
+         && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
+        rc |= _PAGE_INVALID_BITS;
+
     return rc;
 }
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index f173539..4ab99bb 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -448,6 +448,8 @@ void hap_domain_init(struct domain *d)
 {
     INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
 
+    d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
+
     /* Use HAP logdirty mechanism. */
     paging_log_dirty_init(d, hap_enable_log_dirty,
                           hap_disable_log_dirty,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b3fce1b..9e82256 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2081,6 +2081,12 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, 
mfn_t *mfn,
 {
     struct page_info *page;
 
+    if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
+    {
+        *rc = _PAGE_INVALID_BIT;
+        return NULL;
+    }
+
     /* Translate the gfn, unsharing if shared. */
     page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, q);
     if ( p2m_is_paging(*p2mt) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index ec87fb4..4e06c13 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -51,6 +51,16 @@ int shadow_domain_init(struct domain *d, unsigned int 
domcr_flags)
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
+    d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
+#ifndef CONFIG_BIGMEM
+    /*
+     * Shadowed superpages store GFNs in 32-bit page_info fields.
+     * Note that we cannot use guest_supports_superpages() here.
+     */
+    if ( !is_pv_domain(d) || opt_allow_superpage )
+        d->arch.paging.gfn_bits = 32;
+#endif
+
     /* Use shadow pagetables for log-dirty support */
     paging_log_dirty_init(d, sh_enable_log_dirty,
                           sh_disable_log_dirty, sh_clean_dirty_bitmap);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index e5c8499..428be37 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -540,7 +540,8 @@ _sh_propagate(struct vcpu *v,
     ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
 
     /* Check there's something for the shadows to map to */
-    if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) )
+    if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
+         || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
     {
         *sp = shadow_l1e_empty();
         goto done;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index d393ed2..165e533 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -194,6 +194,9 @@ struct paging_domain {
     /* log dirty support */
     struct log_dirty_domain log_dirty;
 
+    /* Number of valid bits in a gfn. */
+    unsigned int gfn_bits;
+
     /* preemption handling */
     struct {
         const struct domain *dom;
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index eb29e62..1f6c2ae 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -222,15 +222,17 @@ guest_supports_nx(struct vcpu *v)
 }
 
 
-/* Some bits are invalid in any pagetable entry. */
-#if GUEST_PAGING_LEVELS == 2
-#define _PAGE_INVALID_BITS (0)
-#elif GUEST_PAGING_LEVELS == 3
-#define _PAGE_INVALID_BITS \
-    get_pte_flags(((1ull<<63) - 1) & ~((1ull<<paddr_bits) - 1))
-#else /* GUEST_PAGING_LEVELS == 4 */
+/*
+ * Some bits are invalid in any pagetable entry.
+ * Normal flags values get represented in 24-bit values (see
+ * get_pte_flags() and put_pte_flags()), so set bit 24 in
+ * addition to be able to flag out of range frame numbers.
+ */
+#if GUEST_PAGING_LEVELS == 3
 #define _PAGE_INVALID_BITS \
-    get_pte_flags(((1ull<<52) - 1) & ~((1ull<<paddr_bits) - 1))
+    (_PAGE_INVALID_BIT | get_pte_flags(((1ull << 63) - 1) & ~(PAGE_SIZE - 1)))
+#else /* 2-level and 4-level */
+#define _PAGE_INVALID_BITS _PAGE_INVALID_BIT
 #endif
 
 
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index f29d370..37ac9e4 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -218,6 +218,8 @@ extern u64 trampoline_misc_enable_off;
 
 /* Maximum width of physical addresses supported by the hardware */
 extern unsigned int paddr_bits;
+/* Max physical address width supported within HAP guests */
+extern unsigned int hap_paddr_bits;
 
 extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]);
 
diff --git a/xen/include/asm-x86/x86_64/page.h 
b/xen/include/asm-x86/x86_64/page.h
index 86abb94..589f225 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -153,6 +153,12 @@ typedef l4_pgentry_t root_pgentry_t;
 #define _PAGE_GNTTAB (1U<<22)
 
 /*
+ * Bit 24 of a 24-bit flag mask!  This is not any bit of a real pte,
+ * and is only used for signalling in variables that contain flags.
+ */
+#define _PAGE_INVALID_BIT (1U<<24)
+
+/*
  * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
  * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
  * is asserted for both.
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.