[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/tlb: fix assisted flush usage



commit 5b718d24e88ceb2c28010c647836929b85b22b5d
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Thu Jul 2 11:05:53 2020 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jul 2 11:09:33 2020 +0200

    x86/tlb: fix assisted flush usage
    
    Commit e9aca9470ed86 introduced a regression when avoiding sending
    IPIs for certain flush operations. Xen page fault handler
    (spurious_page_fault) relies on blocking interrupts in order to
    prevent handling TLB flush IPIs and thus preventing other CPUs from
    removing page tables pages. Switching to assisted flushing avoided such
    IPIs, and thus can result in pages belonging to the page tables being
    removed (and possibly re-used) while __page_fault_type is being
    executed.
    
    Force some of the TLB flushes to use IPIs, thus avoiding the assisted
    TLB flush. Those selected flushes are the page type change (when
    switching from a page table type to a different one, ie: a page that
    has been removed as a page table) and page allocation. This sadly has
    a negative performance impact on the pvshim, as less assisted flushes
    can be used. Note the flush in grant-table code is also switched to
    use an IPI even when not strictly needed. This is done so that a
    common arch_flush_tlb_mask can be introduced and always used in common
    code.
    
    Introduce a new flag (FLUSH_FORCE_IPI) and helper to force a TLB flush
    using an IPI (x86 only). Note that the flag is only meaningfully defined
    when the hypervisor supports PV or shadow paging mode, as otherwise
    hardware assisted paging domains are in charge of their page tables and
    won't share page tables with Xen, thus not influencing the result of
    page walks performed by the spurious fault handler.
    
    Just passing this new flag when calling flush_area_mask prevents the
    usage of the assisted flush without any other side effects.
    
    Note the flag is not defined on Arm.
    
    Fixes: e9aca9470ed86 ('x86/tlb: use Xen L0 assisted TLB flush when 
available')
    Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Julien Grall <jgrall@xxxxxxxxxx>
    Release-acked-by: Paul Durrant <paul@xxxxxxx>
---
 xen/arch/arm/smp.c             |  2 +-
 xen/arch/x86/mm.c              | 12 +++++++++++-
 xen/common/grant_table.c       |  2 +-
 xen/include/asm-arm/flushtlb.h |  2 +-
 xen/include/asm-x86/flushtlb.h | 17 +++++++++++++++++
 xen/include/xen/mm.h           |  2 +-
 6 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
index ce1fcc8ef9..5823a69d3e 100644
--- a/xen/arch/arm/smp.c
+++ b/xen/arch/arm/smp.c
@@ -5,7 +5,7 @@
 #include <asm/gic.h>
 #include <asm/flushtlb.h>
 
-void flush_tlb_mask(const cpumask_t *mask)
+void arch_flush_tlb_mask(const cpumask_t *mask)
 {
     /* No need to IPI other processors on ARM, the processor takes care of it. 
*/
     flush_all_guests_tlb();
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e376fc7e8f..82bc676553 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2894,7 +2894,17 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
                       ((nx & PGT_type_mask) == PGT_writable_page)) )
                 {
                     perfc_incr(need_flush_tlb_flush);
-                    flush_tlb_mask(mask);
+                    /*
+                     * If page was a page table make sure the flush is
+                     * performed using an IPI in order to avoid changing the
+                     * type of a page table page under the feet of
+                     * spurious_page_fault().
+                     */
+                    flush_mask(mask,
+                               (x & PGT_type_mask) &&
+                               (x & PGT_type_mask) <= PGT_root_page_table
+                               ? FLUSH_TLB | FLUSH_FORCE_IPI
+                               : FLUSH_TLB);
                 }
 
                 /* We lose existing type and validity. */
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index ece670e484..9f0cae52c0 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -393,7 +393,7 @@ static inline void grant_write_unlock(struct grant_table 
*gt)
 static inline void gnttab_flush_tlb(const struct domain *d)
 {
     if ( !paging_mode_external(d) )
-        flush_tlb_mask(d->dirty_cpumask);
+        arch_flush_tlb_mask(d->dirty_cpumask);
 }
 
 static inline unsigned int
diff --git a/xen/include/asm-arm/flushtlb.h b/xen/include/asm-arm/flushtlb.h
index ab1aae5c90..125a141975 100644
--- a/xen/include/asm-arm/flushtlb.h
+++ b/xen/include/asm-arm/flushtlb.h
@@ -26,7 +26,7 @@ static inline void page_set_tlbflush_timestamp(struct 
page_info *page)
 #endif
 
 /* Flush specified CPUs' TLBs */
-void flush_tlb_mask(const cpumask_t *mask);
+void arch_flush_tlb_mask(const cpumask_t *mask);
 
 /*
  * Flush a range of VA's hypervisor mappings from the TLB of the local
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 8639427cce..0be2273387 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -126,6 +126,16 @@ void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);
 #else
 #define FLUSH_HVM_ASID_CORE 0
 #endif
+#if defined(CONFIG_PV) || defined(CONFIG_SHADOW_PAGING)
+/*
+ * Force an IPI to be sent. Note that adding this to the flags passed to
+ * flush_area_mask will prevent using the assisted flush without having any
+ * other side effect.
+ */
+# define FLUSH_FORCE_IPI 0x8000
+#else
+# define FLUSH_FORCE_IPI 0
+#endif
 
 /* Flush local TLBs/caches. */
 unsigned int flush_area_local(const void *va, unsigned int flags);
@@ -151,6 +161,13 @@ void flush_area_mask(const cpumask_t *, const void *va, 
unsigned int flags);
 #define flush_tlb_one_mask(mask,v)              \
     flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
 
+/*
+ * Make the common code TLB flush helper force use of an IPI in order to be
+ * on the safe side. Note that not all calls from common code strictly require
+ * this.
+ */
+#define arch_flush_tlb_mask(mask) flush_mask(mask, FLUSH_TLB | FLUSH_FORCE_IPI)
+
 /* Flush all CPUs' TLBs */
 #define flush_tlb_all()                         \
     flush_tlb_mask(&cpu_online_map)
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9b62087be1..1061765bcd 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -648,7 +648,7 @@ static inline void filtered_flush_tlb_mask(uint32_t 
tlbflush_timestamp)
     if ( !cpumask_empty(&mask) )
     {
         perfc_incr(need_flush_tlb_flush);
-        flush_tlb_mask(&mask);
+        arch_flush_tlb_mask(&mask);
     }
 }
 
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.