|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH for-next 3/3] x86/ldt: Alter how invalidate_shadow_ldt() deals with TLB flushes
Modify invalidate_shadow_ldt() to return a boolean indicating whether mappings
have been dropped, rather than taking a flush parameter. Tweak the internal
logic to be able to ASSERT() that v->arch.pv_vcpu.shadow_ldt_mapcnt matches
the number of PTEs removed.
This allows MMUEXTOP_SET_LDT to avoid a local TLB flush if no LDT entries had
been faulted in to begin with.
Finally, correct a comment in __get_page_type(). Under no circumstance is it
safe to forgo the TLB shootdown for GDT/LDT pages, as that would allow one
vcpu to gain a writeable mapping to a frame still mapped as a GDT/LDT by
another vcpu.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/mm.c | 39 +++++++++++++++++++++++++--------------
1 file changed, 25 insertions(+), 14 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d9df5ca..37de4ff 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -505,10 +505,14 @@ static inline void page_set_tlbflush_timestamp(struct
page_info *page)
const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
zero_page[PAGE_SIZE];
-static void invalidate_shadow_ldt(struct vcpu *v, int flush)
+/*
+ * Flush the LDT, dropping any typerefs. Returns a boolean indicating whether
+ * mappings have been removed (i.e. a TLB flush is needed).
+ */
+static bool invalidate_shadow_ldt(struct vcpu *v)
{
l1_pgentry_t *pl1e;
- unsigned int i;
+ unsigned int i, mappings_dropped = 0;
struct page_info *page;
BUG_ON(unlikely(in_irq()));
@@ -518,26 +522,29 @@ static void invalidate_shadow_ldt(struct vcpu *v, int
flush)
if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
goto out;
- v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
pl1e = pv_ldt_ptes(v);
for ( i = 0; i < 16; i++ )
{
if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
continue;
+
page = l1e_get_page(pl1e[i]);
l1e_write(&pl1e[i], l1e_empty());
+ mappings_dropped++;
+
ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
ASSERT_PAGE_IS_DOMAIN(page, v->domain);
put_page_and_type(page);
}
- /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
- if ( flush )
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped);
+ v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
out:
spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ return !!mappings_dropped;
}
@@ -1087,7 +1094,10 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain
*l1e_owner)
(l1e_owner == pg_owner) )
{
for_each_vcpu ( pg_owner, v )
- invalidate_shadow_ldt(v, 1);
+ {
+ if ( invalidate_shadow_ldt(v) )
+ flush_tlb_mask(v->vcpu_dirty_cpumask);
+ }
}
put_page(page);
}
@@ -2237,9 +2247,9 @@ static int __get_page_type(struct page_info *page,
unsigned long type,
if ( (x & PGT_type_mask) != type )
{
/*
- * On type change we check to flush stale TLB entries. This
- * may be unnecessary (e.g., page was GDT/LDT) but those
- * circumstances should be very rare.
+ * On type change we check to flush stale TLB entries. It is
+ * vital that no other CPUs are left with mappings of a frame
+ * which is about to become writeable to the guest.
*/
cpumask_t *mask = this_cpu(scratch_cpumask);
@@ -2486,7 +2496,7 @@ int new_guest_cr3(mfn_t mfn)
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
write_ptbase(curr);
return 0;
@@ -2524,7 +2534,7 @@ int new_guest_cr3(mfn_t mfn)
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ invalidate_shadow_ldt(curr); /* Unconditional TLB flush later. */
if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
fill_ro_mpt(mfn);
@@ -3025,8 +3035,9 @@ long do_mmuext_op(
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
(curr->arch.pv_vcpu.ldt_base != ptr) )
{
- invalidate_shadow_ldt(curr, 0);
- flush_tlb_local();
+ if ( invalidate_shadow_ldt(curr) )
+ flush_tlb_local();
+
curr->arch.pv_vcpu.ldt_base = ptr;
curr->arch.pv_vcpu.ldt_ents = ents;
load_LDT(curr);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |