[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/shadow: encode full GFN in magic MMIO entries



commit e113ed7134118757980a1d12837fa0f118f81a05
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Fri Apr 9 09:20:15 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Apr 9 09:20:15 2021 +0200

    x86/shadow: encode full GFN in magic MMIO entries
    
    Since we don't need to encode all of the PTE flags, we have enough bits
    in the shadow entry to store the full GFN. Limit use of literal numbers
    a little and instead derive some of the involved values. Sanity-check
    the result via BUILD_BUG_ON()s.
    
    This then allows dropping from sh_l1e_mmio() again the guarding against
    too large GFNs. It needs replacing by an L1TF safety check though, which
    in turn requires exposing cpu_has_bug_l1tf.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/types.h  | 36 ++++++++++++++++++++++++++----------
 xen/arch/x86/spec_ctrl.c        |  2 +-
 xen/include/asm-x86/spec_ctrl.h |  1 +
 3 files changed, 28 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index 27de593d87..a86be62d88 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -283,9 +283,18 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, 
u32 flags)
  * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
  * have reserved bits that we can use for this.  And even there it can only
  * be used if we can be certain the processor doesn't use all 52 address bits.
+ *
+ * For the MMIO encoding (see below) we need the bottom 4 bits for
+ * identifying the kind of entry and a full GFN's worth of bits to encode
+ * the originating frame number.  Set all remaining bits to trigger
+ * reserved bit faults, if (see above) the hardware permits triggering such.
  */
 
-#define SH_L1E_MAGIC 0xffffffff00000001ULL
+#ifdef CONFIG_BIGMEM
+# define SH_L1E_MAGIC_MASK (0xfffff00000000000UL | _PAGE_PRESENT)
+#else
+# define SH_L1E_MAGIC_MASK (0xfffffff000000000UL | _PAGE_PRESENT)
+#endif
 
 static inline bool sh_have_pte_rsvd_bits(void)
 {
@@ -294,7 +303,8 @@ static inline bool sh_have_pte_rsvd_bits(void)
 
 static inline bool sh_l1e_is_magic(shadow_l1e_t sl1e)
 {
-    return (sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC;
+    BUILD_BUG_ON(!(PADDR_MASK & PAGE_MASK & SH_L1E_MAGIC_MASK));
+    return (sl1e.l1 & SH_L1E_MAGIC_MASK) == SH_L1E_MAGIC_MASK;
 }
 
 /* Guest not present: a single magic value */
@@ -320,24 +330,30 @@ static inline bool sh_l1e_is_gnp(shadow_l1e_t sl1e)
 
 /*
  * MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
- * We store 28 bits of GFN in bits 4:32 of the entry.
+ * We store the GFN in bits 4:35 (BIGMEM: 4:43) of the entry.
  * The present bit is set, and the U/S and R/W bits are taken from the guest.
  * Bit 3 is always 0, to differentiate from gnp above.
  */
-#define SH_L1E_MMIO_MAGIC       0xffffffff00000001ULL
-#define SH_L1E_MMIO_MAGIC_MASK  0xffffffff00000009ULL
-#define SH_L1E_MMIO_GFN_MASK    0x00000000fffffff0ULL
+#define SH_L1E_MMIO_MAGIC       SH_L1E_MAGIC_MASK
+#define SH_L1E_MMIO_MAGIC_BIT   8
+#define SH_L1E_MMIO_MAGIC_MASK  (SH_L1E_MMIO_MAGIC | SH_L1E_MMIO_MAGIC_BIT)
+#define SH_L1E_MMIO_GFN_MASK    ~(SH_L1E_MMIO_MAGIC_MASK | _PAGE_RW | 
_PAGE_USER)
 
 static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
 {
     unsigned long gfn_val = MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK);
+    shadow_l1e_t sl1e = { (SH_L1E_MMIO_MAGIC | gfn_val |
+                           (gflags & (_PAGE_USER | _PAGE_RW))) };
+
+    BUILD_BUG_ON(SH_L1E_MMIO_MAGIC_BIT <= _PAGE_RW);
+    BUILD_BUG_ON(SH_L1E_MMIO_MAGIC_BIT <= _PAGE_USER);
 
     if ( !sh_have_pte_rsvd_bits() ||
-         gfn_x(gfn) != MASK_EXTR(gfn_val, SH_L1E_MMIO_GFN_MASK) )
-        return shadow_l1e_empty();
+         (cpu_has_bug_l1tf &&
+          !is_l1tf_safe_maddr(shadow_l1e_get_paddr(sl1e))) )
+        sl1e = shadow_l1e_empty();
 
-    return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC | gfn_val |
-                             (gflags & (_PAGE_USER | _PAGE_RW))) };
+    return sl1e;
 }
 
 static inline bool sh_l1e_is_mmio(shadow_l1e_t sl1e)
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index c9f78ead62..cd05f42394 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -59,7 +59,7 @@ uint8_t __read_mostly default_xen_spec_ctrl;
 uint8_t __read_mostly default_spec_ctrl_flags;
 
 paddr_t __read_mostly l1tf_addr_mask, __read_mostly l1tf_safe_maddr;
-static bool __initdata cpu_has_bug_l1tf;
+bool __read_mostly cpu_has_bug_l1tf;
 static unsigned int __initdata l1d_maxphysaddr;
 
 static bool __initdata cpu_has_bug_msbds_only; /* => minimal HT impact. */
diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
index b252bb8631..e671b6fd8d 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -45,6 +45,7 @@ extern uint8_t default_spec_ctrl_flags;
 
 extern int8_t opt_xpti_hwdom, opt_xpti_domu;
 
+extern bool cpu_has_bug_l1tf;
 extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu;
 
 /*
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.