[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] x86/tlb: use Xen L0 assisted TLB flush when available



Use Xen's L0 HVMOP_flush_tlbs hypercall when available in order to
perform flushes. This greatly increases the performance of tlb flushes
when running with a high amount of vCPUs as a Xen guest, and is
specially important when running in shim mode.

The following figures are from a PV guest running `make -j342 xen` in
shim mode with 32 vCPUs.

Using x2APIC and ALLBUT shorthand:
real    4m35.973s
user    4m35.110s
sys     36m24.117s

Using L0 assisted flush:
real    1m17.391s
user    4m42.413s
sys     6m20.773s

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/guest/xen/xen.c    | 11 +++++++++++
 xen/arch/x86/smp.c              |  6 ++++++
 xen/include/asm-x86/guest/xen.h |  7 +++++++
 3 files changed, 24 insertions(+)

diff --git a/xen/arch/x86/guest/xen/xen.c b/xen/arch/x86/guest/xen/xen.c
index 6dbc5f953f..e6493caecf 100644
--- a/xen/arch/x86/guest/xen/xen.c
+++ b/xen/arch/x86/guest/xen/xen.c
@@ -281,6 +281,17 @@ int xg_free_unused_page(mfn_t mfn)
     return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
 }
 
+int xg_flush_tlbs(void)
+{
+    int rc;
+
+    do {
+        rc = xen_hypercall_hvm_op(HVMOP_flush_tlbs, NULL);
+    } while ( rc == -ERESTART );
+
+    return rc;
+}
+
 static void ap_resume(void *unused)
 {
     map_vcpuinfo();
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index 427c33db9d..a892db28c1 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -15,6 +15,7 @@
 #include <xen/perfc.h>
 #include <xen/spinlock.h>
 #include <asm/current.h>
+#include <asm/guest.h>
 #include <asm/smp.h>
 #include <asm/mc146818rtc.h>
 #include <asm/flushtlb.h>
@@ -235,6 +236,11 @@ void flush_area_mask(const cpumask_t *mask, const void 
*va, unsigned int flags)
     {
         bool cpus_locked = false;
 
+        if ( xen_guest &&
+             !(flags & ~(FLUSH_TLB | FLUSH_TLB_GLOBAL | FLUSH_VA_VALID)) &&
+             !xg_flush_tlbs() )
+            return;
+
         spin_lock(&flush_lock);
         cpumask_and(&flush_cpumask, mask, &cpu_online_map);
         cpumask_clear_cpu(cpu, &flush_cpumask);
diff --git a/xen/include/asm-x86/guest/xen.h b/xen/include/asm-x86/guest/xen.h
index 2042a9a0c2..f0de9e4d71 100644
--- a/xen/include/asm-x86/guest/xen.h
+++ b/xen/include/asm-x86/guest/xen.h
@@ -36,6 +36,7 @@ extern uint32_t xen_cpuid_base;
 const struct hypervisor_ops *xg_probe(void);
 int xg_alloc_unused_page(mfn_t *mfn);
 int xg_free_unused_page(mfn_t mfn);
+int xg_flush_tlbs(void);
 
 DECLARE_PER_CPU(unsigned int, vcpu_id);
 DECLARE_PER_CPU(struct vcpu_info *, vcpu_info);
@@ -47,6 +48,12 @@ DECLARE_PER_CPU(struct vcpu_info *, vcpu_info);
 
 static inline const struct hypervisor_ops *xg_probe(void) { return NULL; }
 
+static inline int xg_flush_tlbs(void)
+{
+    ASSERT_UNREACHABLE();
+    return -ENOSYS;
+}
+
 #endif /* CONFIG_XEN_GUEST */
 #endif /* __X86_GUEST_XEN_H__ */
 
-- 
2.24.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.