[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/ldt: Alter how invalidate_shadow_ldt() deals with TLB flushes



commit bfa83bbc3385bab0bfee3c7f408c164c5584e688
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Oct 2 14:13:38 2017 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri Dec 1 19:03:26 2017 +0000

    x86/ldt: Alter how invalidate_shadow_ldt() deals with TLB flushes
    
    Modify invalidate_shadow_ldt() to return a boolean indicating whether 
mappings
    have been dropped, rather than taking a flush parameter.  Tweak the internal
    logic to be able to ASSERT() that v->arch.pv_vcpu.shadow_ldt_mapcnt matches
    the number of PTEs removed.
    
    This allows MMUEXTOP_SET_LDT to avoid a local TLB flush if no LDT entries 
had
    been faulted in to begin with.
    
    Finally, correct a comment in __get_page_type().  Under no circumstance is 
it
    safe to forgo the TLB shootdown for GDT/LDT pages, as that would allow one
    vcpu to gain a writeable mapping to a frame still mapped as a GDT/LDT by
    another vcpu.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/mm.c | 39 +++++++++++++++++++++++++--------------
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6ec6e68..155e425 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -544,10 +544,14 @@ static inline void set_tlbflush_timestamp(struct 
page_info *page)
 const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
     zero_page[PAGE_SIZE];
 
-static void invalidate_shadow_ldt(struct vcpu *v, int flush)
+/*
+ * Flush the LDT, dropping any typerefs.  Returns a boolean indicating whether
+ * mappings have been removed (i.e. a TLB flush is needed).
+ */
+static bool invalidate_shadow_ldt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e;
-    unsigned int i;
+    unsigned int i, mappings_dropped = 0;
     struct page_info *page;
 
     BUG_ON(unlikely(in_irq()));
@@ -557,26 +561,29 @@ static void invalidate_shadow_ldt(struct vcpu *v, int 
flush)
     if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
         goto out;
 
-    v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
     pl1e = pv_ldt_ptes(v);
 
     for ( i = 0; i < 16; i++ )
     {
         if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
             continue;
+
         page = l1e_get_page(pl1e[i]);
         l1e_write(&pl1e[i], l1e_empty());
+        mappings_dropped++;
+
         ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
         ASSERT_PAGE_IS_DOMAIN(page, v->domain);
         put_page_and_type(page);
     }
 
-    /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
-    if ( flush )
-        flush_tlb_mask(v->vcpu_dirty_cpumask);
+    ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
+    v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
 
  out:
     spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+    return mappings_dropped;
 }
 
 
@@ -1239,7 +1246,10 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain 
*l1e_owner)
              (l1e_owner == pg_owner) )
         {
             for_each_vcpu ( pg_owner, v )
-                invalidate_shadow_ldt(v, 1);
+            {
+                if ( invalidate_shadow_ldt(v) )
+                    flush_tlb_mask(v->vcpu_dirty_cpumask);
+            }
         }
         put_page(page);
     }
@@ -2561,9 +2571,9 @@ static int __get_page_type(struct page_info *page, 
unsigned long type,
             if ( (x & PGT_type_mask) != type )
             {
                 /*
-                 * On type change we check to flush stale TLB entries. This
-                 * may be unnecessary (e.g., page was GDT/LDT) but those
-                 * circumstances should be very rare.
+                 * On type change we check to flush stale TLB entries. It is
+                 * vital that no other CPUs are left with mappings of a frame
+                 * which is about to become writeable to the guest.
                  */
                 cpumask_t *mask = this_cpu(scratch_cpumask);
 
@@ -2814,7 +2824,7 @@ int new_guest_cr3(mfn_t mfn)
             return rc;
         }
 
-        invalidate_shadow_ldt(curr, 0);
+        invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
         write_ptbase(curr);
 
         return 0;
@@ -2852,7 +2862,7 @@ int new_guest_cr3(mfn_t mfn)
         return rc;
     }
 
-    invalidate_shadow_ldt(curr, 0);
+    invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
 
     if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
         fill_ro_mpt(mfn);
@@ -3359,8 +3369,9 @@ long do_mmuext_op(
             else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
                       (curr->arch.pv_vcpu.ldt_base != ptr) )
             {
-                invalidate_shadow_ldt(curr, 0);
-                flush_tlb_local();
+                if ( invalidate_shadow_ldt(curr) )
+                    flush_tlb_local();
+
                 curr->arch.pv_vcpu.ldt_base = ptr;
                 curr->arch.pv_vcpu.ldt_ents = ents;
                 load_LDT(curr);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.