[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/5] x86/pv: Drop support for paging out the LDT
Windows is the only OS which pages out kernel datastructures, so chances are good that this is a vestigial remnant of the PV Windows XP experiment. Furthermore the implementation is incomplete; it only functions for a present => not-present transition, rather than a present => read/write transition. The for_each_vcpu() is one scalability limitation for PV guests, which can't reasonably be altered to be continuable. One side effects of dropping paging out support is that now, the LDT (like the GDT) is only ever modified in current context, allowing us to drop shadow_ldt_mapcnt and shadow_ldt_lock from struct vcpu. Another side effect is that the LDT no longer automatically cleans itself up on domain destruction. Cover this by explicitly releasing the LDT frames at the same time as the GDT frames. Finally, leave some asserts around to confirm the expected behaviour of all the functions playing with PGT_seg_desc_page references. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> --- xen/arch/x86/domain.c | 7 ++----- xen/arch/x86/mm.c | 17 ----------------- xen/arch/x86/pv/descriptor-tables.c | 20 ++++++-------------- xen/arch/x86/pv/domain.c | 2 -- xen/arch/x86/pv/mm.c | 3 --- xen/include/asm-x86/domain.h | 4 ---- 6 files changed, 8 insertions(+), 45 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index da1bf1a..2b7bc5b 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1942,11 +1942,8 @@ int domain_relinquish_resources(struct domain *d) { for_each_vcpu ( d, v ) { - /* - * Relinquish GDT mappings. No need for explicit unmapping of - * the LDT as it automatically gets squashed with the guest - * mappings. - */ + /* Relinquish GDT/LDT mappings. */ + pv_destroy_ldt(v); pv_destroy_gdt(v); } } diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 14cfa93..15a9334 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1152,7 +1152,6 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner) unsigned long pfn = l1e_get_pfn(l1e); struct page_info *page; struct domain *pg_owner; - struct vcpu *v; if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(_mfn(pfn)) ) return; @@ -1188,25 +1187,9 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner) */ if ( (l1e_get_flags(l1e) & _PAGE_RW) && ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) ) - { put_page_and_type(page); - } else - { - /* We expect this is rare so we blow the entire shadow LDT. */ - if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == - PGT_seg_desc_page)) && - unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) && - (l1e_owner == pg_owner) ) - { - for_each_vcpu ( pg_owner, v ) - { - if ( pv_destroy_ldt(v) ) - flush_tlb_mask(v->vcpu_dirty_cpumask); - } - } put_page(page); - } } diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c index b418bbb..77f9851 100644 --- a/xen/arch/x86/pv/descriptor-tables.c +++ b/xen/arch/x86/pv/descriptor-tables.c @@ -37,18 +37,12 @@ */ bool pv_destroy_ldt(struct vcpu *v) { - l1_pgentry_t *pl1e; + l1_pgentry_t *pl1e = pv_ldt_ptes(v); unsigned int i, mappings_dropped = 0; struct page_info *page; ASSERT(!in_irq()); - - spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock); - - if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 ) - goto out; - - pl1e = pv_ldt_ptes(v); + ASSERT(v == current || cpumask_empty(v->vcpu_dirty_cpumask)); for ( i = 0; i < 16; i++ ) { @@ -64,12 +58,6 @@ bool pv_destroy_ldt(struct vcpu *v) put_page_and_type(page); } - ASSERT(v->arch.pv_vcpu.shadow_ldt_mapcnt == mappings_dropped); - v->arch.pv_vcpu.shadow_ldt_mapcnt = 0; - - out: - spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock); - return mappings_dropped; } @@ -80,6 +68,8 @@ void pv_destroy_gdt(struct vcpu *v) l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO); unsigned int i; + ASSERT(v == current || cpumask_empty(v->vcpu_dirty_cpumask)); + v->arch.pv_vcpu.gdt_ents = 0; for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ ) { @@ -100,6 +90,8 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries) l1_pgentry_t *pl1e; unsigned int i, nr_frames = DIV_ROUND_UP(entries, 512); + ASSERT(v == current || cpumask_empty(v->vcpu_dirty_cpumask)); + if ( entries > FIRST_RESERVED_GDT_ENTRY ) return -EINVAL; diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c index 74e9e66..fbf8941 100644 --- a/xen/arch/x86/pv/domain.c +++ b/xen/arch/x86/pv/domain.c @@ -128,8 +128,6 @@ int pv_vcpu_initialise(struct vcpu *v) ASSERT(!is_idle_domain(d)); - spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock); - rc = pv_create_gdt_ldt_l1tab(v); if ( rc ) return rc; diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c index 8d7a4fd..d293724 100644 --- a/xen/arch/x86/pv/mm.c +++ b/xen/arch/x86/pv/mm.c @@ -125,10 +125,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset) pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT]; l1e_add_flags(gl1e, _PAGE_RW); - spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock); l1e_write(pl1e, gl1e); - curr->arch.pv_vcpu.shadow_ldt_mapcnt++; - spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock); return true; } diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 4679d54..758e030 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -491,10 +491,6 @@ struct pv_vcpu unsigned int iopl; /* Current IOPL for this VCPU, shifted left by * 12 to match the eflags register. */ - /* Current LDT details. */ - unsigned long shadow_ldt_mapcnt; - spinlock_t shadow_ldt_lock; - /* data breakpoint extension MSRs */ uint32_t dr_mask[4]; -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |