[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/32: Re-factor mapcache data structure into per-domain and per-vcpu
# HG changeset patch # User Keir Fraser <keir@xxxxxxxxxxxxx> # Date 1191338938 -3600 # Node ID e1b574bc36b5068baf0053da32f8ca11a907625a # Parent 818cc26e3926614d0e9c2431c746a56bb645337d x86/32: Re-factor mapcache data structure into per-domain and per-vcpu components. Greatly reduces size of 'struct domain'. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/domain.c | 4 + xen/arch/x86/x86_32/domain_page.c | 107 +++++++++++++++++++------------------- xen/include/asm-x86/domain.h | 36 +++++++----- 3 files changed, 79 insertions(+), 68 deletions(-) diff -r 818cc26e3926 -r e1b574bc36b5 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Oct 02 13:19:25 2007 +0100 +++ b/xen/arch/x86/domain.c Tue Oct 02 16:28:58 2007 +0100 @@ -382,6 +382,8 @@ int vcpu_initialise(struct vcpu *v) v->arch.flags = TF_kernel_mode; + mapcache_vcpu_init(v); + pae_l3_cache_init(&v->arch.pae_l3_cache); paging_vcpu_init(v); @@ -461,7 +463,7 @@ int arch_domain_create(struct domain *d) #if defined(__i386__) - mapcache_init(d); + mapcache_domain_init(d); #else /* __x86_64__ */ diff -r 818cc26e3926 -r e1b574bc36b5 xen/arch/x86/x86_32/domain_page.c --- a/xen/arch/x86/x86_32/domain_page.c Tue Oct 02 13:19:25 2007 +0100 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Oct 02 16:28:58 2007 +0100 @@ -43,9 +43,10 @@ void *map_domain_page(unsigned long mfn) void *map_domain_page(unsigned long mfn) { unsigned long va; - unsigned int idx, i, vcpu; - struct vcpu *v; - struct mapcache *cache; + unsigned int idx, i; + struct vcpu *v; + struct mapcache_domain *dcache; + struct mapcache_vcpu *vcache; struct vcpu_maphash_entry *hashent; ASSERT(!in_irq()); @@ -54,59 +55,59 @@ void *map_domain_page(unsigned long mfn) v = mapcache_current_vcpu(); - vcpu = v->vcpu_id; - cache = &v->domain->arch.mapcache; - - hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(mfn)]; + dcache = &v->domain->arch.mapcache; + vcache = &v->arch.mapcache; + + hashent = &vcache->hash[MAPHASH_HASHFN(mfn)]; if ( hashent->mfn == mfn ) { idx = hashent->idx; hashent->refcnt++; ASSERT(idx < MAPCACHE_ENTRIES); ASSERT(hashent->refcnt != 0); - ASSERT(l1e_get_pfn(cache->l1tab[idx]) == mfn); + ASSERT(l1e_get_pfn(dcache->l1tab[idx]) == mfn); goto out; } - spin_lock(&cache->lock); + spin_lock(&dcache->lock); /* Has some other CPU caused a wrap? We must flush if so. */ - if ( unlikely(cache->epoch != cache->shadow_epoch[vcpu]) ) - { - cache->shadow_epoch[vcpu] = cache->epoch; - if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) ) + if ( unlikely(dcache->epoch != vcache->shadow_epoch) ) + { + vcache->shadow_epoch = dcache->epoch; + if ( NEED_FLUSH(this_cpu(tlbflush_time), dcache->tlbflush_timestamp) ) { perfc_incr(domain_page_tlb_flush); local_flush_tlb(); } } - idx = find_next_zero_bit(cache->inuse, MAPCACHE_ENTRIES, cache->cursor); + idx = find_next_zero_bit(dcache->inuse, MAPCACHE_ENTRIES, dcache->cursor); if ( unlikely(idx >= MAPCACHE_ENTRIES) ) { /* /First/, clean the garbage map and update the inuse list. */ - for ( i = 0; i < ARRAY_SIZE(cache->garbage); i++ ) - { - unsigned long x = xchg(&cache->garbage[i], 0); - cache->inuse[i] &= ~x; + for ( i = 0; i < ARRAY_SIZE(dcache->garbage); i++ ) + { + unsigned long x = xchg(&dcache->garbage[i], 0); + dcache->inuse[i] &= ~x; } /* /Second/, flush TLBs. */ perfc_incr(domain_page_tlb_flush); local_flush_tlb(); - cache->shadow_epoch[vcpu] = ++cache->epoch; - cache->tlbflush_timestamp = tlbflush_current_time(); - - idx = find_first_zero_bit(cache->inuse, MAPCACHE_ENTRIES); + vcache->shadow_epoch = ++dcache->epoch; + dcache->tlbflush_timestamp = tlbflush_current_time(); + + idx = find_first_zero_bit(dcache->inuse, MAPCACHE_ENTRIES); BUG_ON(idx >= MAPCACHE_ENTRIES); } - set_bit(idx, cache->inuse); - cache->cursor = idx + 1; - - spin_unlock(&cache->lock); - - l1e_write(&cache->l1tab[idx], l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); + set_bit(idx, dcache->inuse); + dcache->cursor = idx + 1; + + spin_unlock(&dcache->lock); + + l1e_write(&dcache->l1tab[idx], l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); out: va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT); @@ -117,7 +118,7 @@ void unmap_domain_page(void *va) { unsigned int idx; struct vcpu *v; - struct mapcache *cache; + struct mapcache_domain *dcache; unsigned long mfn; struct vcpu_maphash_entry *hashent; @@ -128,11 +129,11 @@ void unmap_domain_page(void *va) v = mapcache_current_vcpu(); - cache = &v->domain->arch.mapcache; + dcache = &v->domain->arch.mapcache; idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; - mfn = l1e_get_pfn(cache->l1tab[idx]); - hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(mfn)]; + mfn = l1e_get_pfn(dcache->l1tab[idx]); + hashent = &v->arch.mapcache.hash[MAPHASH_HASHFN(mfn)]; if ( hashent->idx == idx ) { @@ -145,10 +146,10 @@ void unmap_domain_page(void *va) if ( hashent->idx != MAPHASHENT_NOTINUSE ) { /* /First/, zap the PTE. */ - ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->mfn); - l1e_write(&cache->l1tab[hashent->idx], l1e_empty()); + ASSERT(l1e_get_pfn(dcache->l1tab[hashent->idx]) == hashent->mfn); + l1e_write(&dcache->l1tab[hashent->idx], l1e_empty()); /* /Second/, mark as garbage. */ - set_bit(hashent->idx, cache->garbage); + set_bit(hashent->idx, dcache->garbage); } /* Add newly-freed mapping to the maphash. */ @@ -158,30 +159,30 @@ void unmap_domain_page(void *va) else { /* /First/, zap the PTE. */ - l1e_write(&cache->l1tab[idx], l1e_empty()); + l1e_write(&dcache->l1tab[idx], l1e_empty()); /* /Second/, mark as garbage. */ - set_bit(idx, cache->garbage); - } -} - -void mapcache_init(struct domain *d) -{ - unsigned int i, j; - struct vcpu_maphash_entry *hashent; - + set_bit(idx, dcache->garbage); + } +} + +void mapcache_domain_init(struct domain *d) +{ d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt + (GDT_LDT_MBYTES << (20 - PAGE_SHIFT)); spin_lock_init(&d->arch.mapcache.lock); +} + +void mapcache_vcpu_init(struct vcpu *v) +{ + unsigned int i; + struct vcpu_maphash_entry *hashent; /* Mark all maphash entries as not in use. */ - for ( i = 0; i < MAX_VIRT_CPUS; i++ ) - { - for ( j = 0; j < MAPHASH_ENTRIES; j++ ) - { - hashent = &d->arch.mapcache.vcpu_maphash[i].hash[j]; - hashent->mfn = ~0UL; /* never valid to map */ - hashent->idx = MAPHASHENT_NOTINUSE; - } + for ( i = 0; i < MAPHASH_ENTRIES; i++ ) + { + hashent = &v->arch.mapcache.hash[i]; + hashent->mfn = ~0UL; /* never valid to map */ + hashent->idx = MAPHASHENT_NOTINUSE; } } diff -r 818cc26e3926 -r e1b574bc36b5 xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Tue Oct 02 13:19:25 2007 +0100 +++ b/xen/include/asm-x86/domain.h Tue Oct 02 16:28:58 2007 +0100 @@ -28,17 +28,21 @@ struct trap_bounce { #define MAPHASH_ENTRIES 8 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1)) #define MAPHASHENT_NOTINUSE ((u16)~0U) -struct vcpu_maphash { +struct mapcache_vcpu { + /* Shadow of mapcache_domain.epoch. */ + unsigned int shadow_epoch; + + /* Lock-free per-VCPU hash of recently-used mappings. */ struct vcpu_maphash_entry { unsigned long mfn; uint16_t idx; uint16_t refcnt; } hash[MAPHASH_ENTRIES]; -} __cacheline_aligned; +}; #define MAPCACHE_ORDER 10 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER) -struct mapcache { +struct mapcache_domain { /* The PTEs that provide the mappings, and a cursor into the array. */ l1_pgentry_t *l1tab; unsigned int cursor; @@ -47,27 +51,25 @@ struct mapcache { spinlock_t lock; /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */ - unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS]; + unsigned int epoch; u32 tlbflush_timestamp; /* Which mappings are in use, and which are garbage to reap next epoch? */ unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)]; unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)]; - - /* Lock-free per-VCPU hash of recently-used mappings. */ - struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS]; -}; - -extern void mapcache_init(struct domain *); +}; + +void mapcache_domain_init(struct domain *); +void mapcache_vcpu_init(struct vcpu *); /* x86/64: toggle guest between kernel and user modes. */ -extern void toggle_guest_mode(struct vcpu *); +void toggle_guest_mode(struct vcpu *); /* * Initialise a hypercall-transfer page. The given pointer must be mapped * in Xen virtual address space (accesses are not validated or checked). */ -extern void hypercall_page_initialise(struct domain *d, void *); +void hypercall_page_initialise(struct domain *d, void *); /************************************************/ /* shadow paging extension */ @@ -204,7 +206,7 @@ struct arch_domain #ifdef CONFIG_X86_32 /* map_domain_page() mapping cache. */ - struct mapcache mapcache; + struct mapcache_domain mapcache; #endif #ifdef CONFIG_COMPAT @@ -290,7 +292,7 @@ struct arch_vcpu struct trap_bounce trap_bounce; /* I/O-port access bitmap. */ - XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */ + XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel vaddr of the bitmap. */ int iobmp_limit; /* Number of ports represented in the bitmap. */ int iopl; /* Current IOPL for this VCPU. */ @@ -327,6 +329,12 @@ struct arch_vcpu /* Guest-specified relocation of vcpu_info. */ unsigned long vcpu_info_mfn; + +#ifdef CONFIG_X86_32 + /* map_domain_page() mapping cache. */ + struct mapcache_vcpu mapcache; +#endif + } __cacheline_aligned; /* Shorthands to improve code legibility. */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |