[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 5/7] x86/tlb: introduce a flush guests TLB flag



Introduce a specific flag to request a HVM guest TLB flush, which is
an ASID/VPID tickle that forces a linear TLB flush for all HVM guests.

This was previously unconditionally done in each pre_flush call, but
that's not required: HVM guests not using shadow don't require linear
TLB flushes as Xen doesn't modify the guest page tables in that case
(ie: when using HAP).

Modify all shadow code TLB flushes to also flush the guest TLB, in
order to keep the previous behavior. I haven't looked at each specific
shadow code TLB flush in order to figure out whether it actually
requires a guest TLB flush or not, so there might be room for
improvement in that regard.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/flushtlb.c         |  5 +++--
 xen/arch/x86/mm/shadow/common.c | 18 +++++++++---------
 xen/arch/x86/mm/shadow/hvm.c    |  2 +-
 xen/arch/x86/mm/shadow/multi.c  | 16 ++++++++--------
 xen/include/asm-x86/flushtlb.h  |  2 ++
 5 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index 03f92c23dc..e7ccd4ec7b 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -59,8 +59,6 @@ static u32 pre_flush(void)
         raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
 
  skip_clocktick:
-    hvm_flush_guest_tlbs();
-
     return t2;
 }
 
@@ -221,6 +219,9 @@ unsigned int flush_area_local(const void *va, unsigned int 
flags)
             do_tlb_flush();
     }
 
+    if ( flags & FLUSH_GUESTS_TLB )
+        hvm_flush_guest_tlbs();
+
     if ( flags & FLUSH_CACHE )
     {
         const struct cpuinfo_x86 *c = &current_cpu_data;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index ee90e55b41..2a85e10112 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -363,7 +363,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t 
gmfn,
     }
 
     if ( ftlb )
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
     return 0;
 }
@@ -939,7 +939,7 @@ static void _shadow_prealloc(struct domain *d, unsigned int 
pages)
                 /* See if that freed up enough space */
                 if ( d->arch.paging.shadow.free_pages >= pages )
                 {
-                    flush_tlb_mask(d->dirty_cpumask);
+                    flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
                     return;
                 }
             }
@@ -993,7 +993,7 @@ static void shadow_blow_tables(struct domain *d)
                                pagetable_get_mfn(v->arch.shadow_table[i]), 0);
 
     /* Make sure everyone sees the unshadowings */
-    flush_tlb_mask(d->dirty_cpumask);
+    flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 }
 
 void shadow_blow_tables_per_domain(struct domain *d)
@@ -1102,7 +1102,7 @@ mfn_t shadow_alloc(struct domain *d,
         if ( unlikely(!cpumask_empty(&mask)) )
         {
             perfc_incr(shadow_alloc_tlbflush);
-            flush_tlb_mask(&mask);
+            flush_mask(&mask, FLUSH_TLB | FLUSH_GUESTS_TLB);
         }
         /* Now safe to clear the page for reuse */
         clear_domain_page(page_to_mfn(sp));
@@ -2290,7 +2290,7 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int 
fast, int all)
 
     /* Need to flush TLBs now, so that linear maps are safe next time we
      * take a fault. */
-    flush_tlb_mask(d->dirty_cpumask);
+    flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
     paging_unlock(d);
 }
@@ -3005,7 +3005,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
         {
             sh_remove_all_shadows_and_parents(d, mfn);
             if ( sh_remove_all_mappings(d, mfn, _gfn(gfn)) )
-                flush_tlb_mask(d->dirty_cpumask);
+                flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
         }
     }
 
@@ -3045,7 +3045,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                 }
                 omfn = mfn_add(omfn, 1);
             }
-            flush_tlb_mask(&flushmask);
+            flush_mask(&flushmask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
             if ( npte )
                 unmap_domain_page(npte);
@@ -3332,7 +3332,7 @@ int shadow_track_dirty_vram(struct domain *d,
         }
     }
     if ( flush_tlb )
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
     goto out;
 
 out_sl1ma:
@@ -3402,7 +3402,7 @@ bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, 
struct vcpu *v),
     }
 
     /* Flush TLBs on all CPUs with dirty vcpu state. */
-    flush_tlb_mask(mask);
+    flush_mask(mask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
     /* Done. */
     for_each_vcpu ( d, v )
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index a219266fa2..64077d181b 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -590,7 +590,7 @@ static void validate_guest_pt_write(struct vcpu *v, mfn_t 
gmfn,
 
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
     if ( rc & SHADOW_SET_ERROR )
     {
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index dfe264cf83..7efe32c35b 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3066,7 +3066,7 @@ static int sh_page_fault(struct vcpu *v,
         perfc_incr(shadow_rm_write_flush_tlb);
         smp_wmb();
         atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
     }
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3575,7 +3575,7 @@ static bool sh_invlpg(struct vcpu *v, unsigned long 
linear)
     if ( mfn_to_page(sl1mfn)->u.sh.type
          == SH_type_fl1_shadow )
     {
-        flush_tlb_local();
+        flush_local(FLUSH_TLB | FLUSH_GUESTS_TLB);
         return false;
     }
 
@@ -3810,7 +3810,7 @@ sh_update_linear_entries(struct vcpu *v)
          * table entry. But, without this change, it would fetch the wrong
          * value due to a stale TLB.
          */
-        flush_tlb_local();
+        flush_local(FLUSH_TLB | FLUSH_GUESTS_TLB);
     }
 }
 
@@ -4011,7 +4011,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool 
noflush)
      * (old) shadow linear maps in the writeable mapping heuristics. */
 #if GUEST_PAGING_LEVELS == 2
     if ( sh_remove_write_access(d, gmfn, 2, 0) != 0 )
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
 #elif GUEST_PAGING_LEVELS == 3
     /* PAE guests have four shadow_table entries, based on the
@@ -4035,7 +4035,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool 
noflush)
             }
         }
         if ( flush )
-            flush_tlb_mask(d->dirty_cpumask);
+            flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
         /* Now install the new shadows. */
         for ( i = 0; i < 4; i++ )
         {
@@ -4056,7 +4056,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool 
noflush)
     }
 #elif GUEST_PAGING_LEVELS == 4
     if ( sh_remove_write_access(d, gmfn, 4, 0) != 0 )
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
     sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
     if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
     {
@@ -4501,7 +4501,7 @@ static void sh_pagetable_dying(paddr_t gpa)
         }
     }
     if ( flush )
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
 
     /* Remember that we've seen the guest use this interface, so we
      * can rely on it using it in future, instead of guessing at
@@ -4538,7 +4538,7 @@ static void sh_pagetable_dying(paddr_t gpa)
         mfn_to_page(gmfn)->pagetable_dying = true;
         shadow_unhook_mappings(d, smfn, 1/* user pages only */);
         /* Now flush the TLB: we removed toplevel mappings. */
-        flush_tlb_mask(d->dirty_cpumask);
+        flush_mask(d->dirty_cpumask, FLUSH_TLB | FLUSH_GUESTS_TLB);
     }
 
     /* Remember that we've seen the guest use this interface, so we
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 2cfe4e6e97..07f9bc6103 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -105,6 +105,8 @@ void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);
 #define FLUSH_VCPU_STATE 0x1000
  /* Flush the per-cpu root page table */
 #define FLUSH_ROOT_PGTBL 0x2000
+ /* Flush all HVM guests linear TLB (using ASID/VPID) */
+#define FLUSH_GUESTS_TLB 0x4000
 
 /* Flush local TLBs/caches. */
 unsigned int flush_area_local(const void *va, unsigned int flags);
-- 
2.25.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.