[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 4/7] xen/x86: use invpcid for flushing the TLB



If possible use the INVPCID instruction for flushing the TLB instead of
toggling cr4.pge for that purpose.

While at it remove the dependency on cr4.pge being required for mtrr
loading, as this will be required later anyway.

Add a command line option "noinvpcid" for deactivating the use of
INVPCID.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V3:
- new patch
---
 docs/misc/xen-command-line.markdown |  8 ++++++++
 xen/arch/x86/cpu/mtrr/generic.c     | 37 ++++++++++++++++++++++++++-----------
 xen/arch/x86/flushtlb.c             | 31 +++++++++++++++++++++----------
 xen/arch/x86/setup.c                |  7 +++++++
 4 files changed, 62 insertions(+), 21 deletions(-)

diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index 79be9a6ba5..8fc7b2ff3b 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1380,6 +1380,14 @@ Because responsibility for APIC setup is shared between 
Xen and the
 domain 0 kernel this option is automatically propagated to the domain
 0 command line.
 
+### noinvpcid (x86)
+> `= <boolean>`
+
+Disable using the INVPCID instruction for flushing TLB entries.
+This should only be used in case of known issues on the current platform
+with that instruction. Disabling INVPCID will normally result in a slightly
+degraded performance.
+
 ### noirqbalance
 > `= <boolean>`
 
diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c
index e9c0e5e059..e88643f4bf 100644
--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -5,6 +5,7 @@
 #include <xen/mm.h>
 #include <xen/stdbool.h>
 #include <asm/flushtlb.h>
+#include <asm/invpcid.h>
 #include <asm/io.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
@@ -400,8 +401,10 @@ static DEFINE_SPINLOCK(set_atomicity_lock);
  * has been called.
  */
 
-static void prepare_set(void)
+static bool prepare_set(void)
 {
+       unsigned long cr4;
+
        /*  Note that this is not ideal, since the cache is only 
flushed/disabled
           for this CPU while the MTRRs are changed, but changing this requires
           more invasive changes to the way the kernel boots  */
@@ -412,18 +415,24 @@ static void prepare_set(void)
        write_cr0(read_cr0() | X86_CR0_CD);
        wbinvd();
 
-       /*  TLB flushing here relies on Xen always using CR4.PGE. */
-       BUILD_BUG_ON(!(XEN_MINIMAL_CR4 & X86_CR4_PGE));
-       write_cr4(read_cr4() & ~X86_CR4_PGE);
+       cr4 = read_cr4();
+       if (cr4 & X86_CR4_PGE)
+               write_cr4(cr4 & ~X86_CR4_PGE);
+       else if (cpu_has_invpcid)
+               invpcid_flush_all();
+       else
+               asm volatile( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" );
 
        /*  Save MTRR state */
        rdmsrl(MSR_MTRRdefType, deftype);
 
        /*  Disable MTRRs, and set the default type to uncached  */
        mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
+
+       return cr4 & X86_CR4_PGE;
 }
 
-static void post_set(void)
+static void post_set(bool pge)
 {
        /* Intel (P6) standard MTRRs */
        mtrr_wrmsr(MSR_MTRRdefType, deftype);
@@ -432,7 +441,12 @@ static void post_set(void)
        write_cr0(read_cr0() & ~X86_CR0_CD);
 
        /*  Reenable CR4.PGE (also flushes the TLB) */
-       write_cr4(read_cr4() | X86_CR4_PGE);
+       if (pge)
+               write_cr4(read_cr4() | X86_CR4_PGE);
+       else if (cpu_has_invpcid)
+               invpcid_flush_all();
+       else
+               asm volatile( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" );
 
        spin_unlock(&set_atomicity_lock);
 }
@@ -441,14 +455,15 @@ static void generic_set_all(void)
 {
        unsigned long mask, count;
        unsigned long flags;
+       bool pge;
 
        local_irq_save(flags);
-       prepare_set();
+       pge = prepare_set();
 
        /* Actually set the state */
        mask = set_mtrr_state();
 
-       post_set();
+       post_set(pge);
        local_irq_restore(flags);
 
        /*  Use the atomic bitops to update the global mask  */
@@ -457,7 +472,6 @@ static void generic_set_all(void)
                        set_bit(count, &smp_changes_mask);
                mask >>= 1;
        }
-       
 }
 
 static void generic_set_mtrr(unsigned int reg, unsigned long base,
@@ -474,11 +488,12 @@ static void generic_set_mtrr(unsigned int reg, unsigned 
long base,
 {
        unsigned long flags;
        struct mtrr_var_range *vr;
+       bool pge;
 
        vr = &mtrr_state.var_ranges[reg];
 
        local_irq_save(flags);
-       prepare_set();
+       pge = prepare_set();
 
        if (size == 0) {
                /* The invalid bit is kept in the mask, so we simply clear the
@@ -499,7 +514,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned 
long base,
                mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
        }
 
-       post_set();
+       post_set(pge);
        local_irq_restore(flags);
 }
 
diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index 9a9af71d5a..6345676bc5 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -10,6 +10,7 @@
 #include <xen/sched.h>
 #include <xen/softirq.h>
 #include <asm/flushtlb.h>
+#include <asm/invpcid.h>
 #include <asm/page.h>
 
 /* Debug builds: Wrap frequently to stress-test the wrap logic. */
@@ -71,6 +72,25 @@ static void post_flush(u32 t)
     this_cpu(tlbflush_time) = t;
 }
 
+static void do_tlb_flush(void)
+{
+    u32 t;
+
+    t = pre_flush();
+
+    if ( cpu_has_invpcid )
+        invpcid_flush_all();
+    else
+    {
+        unsigned long cr4 = read_cr4();
+
+        write_cr4(cr4 ^ X86_CR4_PGE);
+        write_cr4(cr4);
+    }
+
+    post_flush(t);
+}
+
 void write_cr3(unsigned long cr3)
 {
     unsigned long flags, cr4;
@@ -118,16 +138,7 @@ unsigned int flush_area_local(const void *va, unsigned int 
flags)
                            : : "m" (*(const char *)(va)) : "memory" );
         }
         else
-        {
-            u32 t = pre_flush();
-            unsigned long cr4 = read_cr4();
-
-            write_cr4(cr4 & ~X86_CR4_PGE);
-            barrier();
-            write_cr4(cr4);
-
-            post_flush(t);
-        }
+            do_tlb_flush();
     }
 
     if ( flags & FLUSH_CACHE )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 801334b43f..5318aaa859 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -63,6 +63,10 @@ boolean_param("nosmp", opt_nosmp);
 static unsigned int __initdata max_cpus;
 integer_param("maxcpus", max_cpus);
 
+/* opt_noinvpcid: If true, don't use INVPCID instruction even if available. */
+static bool __initdata opt_noinvpcid;
+boolean_param("noinvpcid", opt_noinvpcid);
+
 unsigned long __read_mostly cr4_pv32_mask;
 
 /* **** Linux config option: propagated to domain0. */
@@ -1549,6 +1553,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
     if ( cpu_has_fsgsbase )
         set_in_cr4(X86_CR4_FSGSBASE);
 
+    if ( opt_noinvpcid )
+        setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+
     init_speculation_mitigations();
 
     init_idle_domain();
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.