[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Add new map_domain_page_global() interface to allow mappings
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 542cb7acb21af9704044cea6720c84e73cb165f3 # Parent f5f703ec52234f82e9ba4fe8e1aa9c5a99344d93 Add new map_domain_page_global() interface to allow mappings that are accessible in all contexts and address spaces. Used by shadow code and vmx code. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/shadow.c Tue Jan 10 17:25:45 2006 @@ -2150,8 +2150,8 @@ if ( max_mode & (SHM_enable | SHM_external) ) { if ( likely(v->arch.guest_vtable != NULL) ) - unmap_domain_page(v->arch.guest_vtable); - v->arch.guest_vtable = map_domain_page(gmfn); + unmap_domain_page_global(v->arch.guest_vtable); + v->arch.guest_vtable = map_domain_page_global(gmfn); } /* @@ -2187,8 +2187,8 @@ ) { if ( v->arch.shadow_vtable ) - unmap_domain_page(v->arch.shadow_vtable); - v->arch.shadow_vtable = map_domain_page(smfn); + unmap_domain_page_global(v->arch.shadow_vtable); + v->arch.shadow_vtable = map_domain_page_global(smfn); } #if CONFIG_PAGING_LEVELS == 2 @@ -2204,8 +2204,8 @@ if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) ) hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn); if ( v->arch.hl2_vtable ) - unmap_domain_page(v->arch.hl2_vtable); - v->arch.hl2_vtable = map_domain_page(hl2mfn); + unmap_domain_page_global(v->arch.hl2_vtable); + v->arch.hl2_vtable = map_domain_page_global(hl2mfn); } /* diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/shadow32.c Tue Jan 10 17:25:45 2006 @@ -733,7 +733,7 @@ ASSERT(mmfn_info != NULL); mmfn = page_to_pfn(mmfn_info); - mpl2e = (l2_pgentry_t *)map_domain_page(mmfn); + mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn); memset(mpl2e, 0, PAGE_SIZE); memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], @@ -794,7 +794,7 @@ * Then free monitor_table. */ mfn = pagetable_get_pfn(v->arch.monitor_table); - unmap_domain_page(v->arch.monitor_vtable); + unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(pfn_to_page(mfn)); v->arch.monitor_table = mk_pagetable(0); @@ -929,7 +929,7 @@ if ( v->arch.guest_vtable && (v->arch.guest_vtable != __linear_l2_table) ) { - unmap_domain_page(v->arch.guest_vtable); + unmap_domain_page_global(v->arch.guest_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.guest_vtable = __linear_l2_table; @@ -942,7 +942,7 @@ if ( v->arch.shadow_vtable && (v->arch.shadow_vtable != __shadow_linear_l2_table) ) { - unmap_domain_page(v->arch.shadow_vtable); + unmap_domain_page_global(v->arch.shadow_vtable); } if ( !(mode & SHM_external) ) v->arch.shadow_vtable = __shadow_linear_l2_table; @@ -955,7 +955,7 @@ if ( v->arch.hl2_vtable && (v->arch.hl2_vtable != __linear_hl2_table) ) { - unmap_domain_page(v->arch.hl2_vtable); + unmap_domain_page_global(v->arch.hl2_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.hl2_vtable = __linear_hl2_table; @@ -2906,8 +2906,8 @@ if ( max_mode & (SHM_enable | SHM_external) ) { if ( likely(v->arch.guest_vtable != NULL) ) - unmap_domain_page(v->arch.guest_vtable); - v->arch.guest_vtable = map_domain_page(gmfn); + unmap_domain_page_global(v->arch.guest_vtable); + v->arch.guest_vtable = map_domain_page_global(gmfn); } /* @@ -2932,8 +2932,8 @@ if ( max_mode == SHM_external ) { if ( v->arch.shadow_vtable ) - unmap_domain_page(v->arch.shadow_vtable); - v->arch.shadow_vtable = map_domain_page(smfn); + unmap_domain_page_global(v->arch.shadow_vtable); + v->arch.shadow_vtable = map_domain_page_global(smfn); } /* @@ -2948,8 +2948,8 @@ if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) ) hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn); if ( v->arch.hl2_vtable ) - unmap_domain_page(v->arch.hl2_vtable); - v->arch.hl2_vtable = map_domain_page(hl2mfn); + unmap_domain_page_global(v->arch.hl2_vtable); + v->arch.hl2_vtable = map_domain_page_global(hl2mfn); } /* diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/shadow_public.c Tue Jan 10 17:25:45 2006 @@ -151,6 +151,8 @@ for (i = 0; i < L1_PAGETABLE_ENTRIES; i++) put_page_from_l1e(pl1e[i], d); + + unmap_domain_page(pl1e); } /* @@ -254,6 +256,7 @@ pae_l3 = map_domain_page(pagetable_get_pfn(d->arch.phys_table)); for (i = 0; i < PDP_ENTRIES; i++) l3[i] = l3e_from_pfn(l3e_get_pfn(pae_l3[i]), __PAGE_HYPERVISOR); + unmap_domain_page(pae_l3); unmap_domain_page(l4); unmap_domain_page(l3); @@ -275,7 +278,7 @@ ASSERT( mmfn_info ); mmfn = page_to_pfn(mmfn_info); - mpl4e = (l4_pgentry_t *) map_domain_page(mmfn); + mpl4e = (l4_pgentry_t *) map_domain_page_global(mmfn); memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE); mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); @@ -298,7 +301,7 @@ * free monitor_table. */ mfn = pagetable_get_pfn(v->arch.monitor_table); - unmap_domain_page(v->arch.monitor_vtable); + unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(pfn_to_page(mfn)); v->arch.monitor_table = mk_pagetable(0); @@ -332,7 +335,7 @@ ASSERT(mmfn_info != NULL); mmfn = page_to_pfn(mmfn_info); - mpl2e = (l2_pgentry_t *)map_domain_page(mmfn); + mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn); memset(mpl2e, 0, PAGE_SIZE); memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], @@ -393,7 +396,7 @@ * Then free monitor_table. */ mfn = pagetable_get_pfn(v->arch.monitor_table); - unmap_domain_page(v->arch.monitor_vtable); + unmap_domain_page_global(v->arch.monitor_vtable); free_domheap_page(pfn_to_page(mfn)); v->arch.monitor_table = mk_pagetable(0); @@ -977,7 +980,7 @@ if ( v->arch.guest_vtable && (v->arch.guest_vtable != __linear_l2_table) ) { - unmap_domain_page(v->arch.guest_vtable); + unmap_domain_page_global(v->arch.guest_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.guest_vtable = __linear_l2_table; @@ -990,7 +993,7 @@ if ( v->arch.shadow_vtable && (v->arch.shadow_vtable != __shadow_linear_l2_table) ) { - unmap_domain_page(v->arch.shadow_vtable); + unmap_domain_page_global(v->arch.shadow_vtable); } if ( !(mode & SHM_external) && d->arch.ops->guest_paging_levels == 2) v->arch.shadow_vtable = __shadow_linear_l2_table; @@ -1004,7 +1007,7 @@ if ( v->arch.hl2_vtable && (v->arch.hl2_vtable != __linear_hl2_table) ) { - unmap_domain_page(v->arch.hl2_vtable); + unmap_domain_page_global(v->arch.hl2_vtable); } if ( (mode & (SHM_translate | SHM_external)) == SHM_translate ) v->arch.hl2_vtable = __linear_hl2_table; diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/vmx.c Tue Jan 10 17:25:45 2006 @@ -98,7 +98,8 @@ /* unmap IO shared page */ struct domain *d = v->domain; if ( d->arch.vmx_platform.shared_page_va ) - unmap_domain_page((void *)d->arch.vmx_platform.shared_page_va); + unmap_domain_page_global( + (void *)d->arch.vmx_platform.shared_page_va); } destroy_vmcs(&v->arch.arch_vmx); diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/vmx_vmcs.c --- a/xen/arch/x86/vmx_vmcs.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/vmx_vmcs.c Tue Jan 10 17:25:45 2006 @@ -193,7 +193,7 @@ domain_crash_synchronous(); } - p = map_domain_page(mpfn); + p = map_domain_page_global(mpfn); if (p == NULL) { printk("Can not map io request shared page for VMX domain.\n"); domain_crash_synchronous(); diff -r f5f703ec5223 -r 542cb7acb21a xen/arch/x86/x86_32/domain_page.c --- a/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 17:16:30 2006 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 17:25:45 2006 @@ -101,3 +101,71 @@ for ( i = 0; i < (1U << order); i++ ) l1e_add_flags(cache->l1tab[idx+i], READY_FOR_TLB_FLUSH); } + +#define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT)) +static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)]; +static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)]; +static unsigned int inuse_cursor; +static spinlock_t globalmap_lock = SPIN_LOCK_UNLOCKED; + +void *map_domain_page_global(unsigned long pfn) +{ + l2_pgentry_t *pl2e; + l1_pgentry_t *pl1e; + unsigned int idx, i; + unsigned long va; + + ASSERT(!in_irq() && local_irq_is_enabled()); + + spin_lock(&globalmap_lock); + + for ( ; ; ) + { + idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor); + va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT); + + /* End of round? If not then we're done in this loop. */ + if ( va < FIXADDR_START ) + break; + + /* /First/, clean the garbage map and update the inuse list. */ + for ( i = 0; i < ARRAY_SIZE(garbage); i++ ) + { + unsigned long x = xchg(&garbage[i], 0); + inuse[i] &= ~x; + } + + /* /Second/, flush all TLBs to get rid of stale garbage mappings. */ + flush_tlb_all(); + + inuse_cursor = 0; + } + + set_bit(idx, inuse); + inuse_cursor = idx + 1; + + spin_unlock(&globalmap_lock); + + pl2e = virt_to_xen_l2e(va); + pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va); + *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR); + + return (void *)va; +} + +void unmap_domain_page_global(void *va) +{ + unsigned long __va = (unsigned long)va; + l2_pgentry_t *pl2e; + l1_pgentry_t *pl1e; + unsigned int idx; + + /* /First/, we zap the PTE. */ + pl2e = virt_to_xen_l2e(__va); + pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va); + *pl1e = l1e_empty(); + + /* /Second/, we add to the garbage map. */ + idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT; + set_bit(idx, garbage); +} diff -r f5f703ec5223 -r 542cb7acb21a xen/include/xen/domain_page.h --- a/xen/include/xen/domain_page.h Tue Jan 10 17:16:30 2006 +++ b/xen/include/xen/domain_page.h Tue Jan 10 17:25:45 2006 @@ -17,15 +17,25 @@ /* * Maps a given range of page frames, returning the mapped virtual address. The - * pages are now accessible until a corresponding call to unmap_domain_page(). + * pages are now accessible within the current domain until a corresponding + * call to unmap_domain_page(). */ extern void *map_domain_pages(unsigned long pfn, unsigned int order); /* - * Pass a VA within the first page of a range previously mapped with - * map_omain_pages(). Those pages will then be removed from the mapping lists. + * Pass a VA within the first page of a range previously mapped in the context + * of the currently-executing domain via a call to map_domain_pages(). Those + * pages will then be removed from the mapping lists. */ extern void unmap_domain_pages(void *va, unsigned int order); + +/* + * Similar to the above calls, except the mapping is accessible in all + * address spaces (not just within the domain that created the mapping). Global + * mappings can also be unmapped from any context. + */ +extern void *map_domain_page_global(unsigned long pfn); +extern void unmap_domain_page_global(void *va); #define DMCACHE_ENTRY_VALID 1U #define DMCACHE_ENTRY_HELD 2U @@ -90,6 +100,9 @@ #define map_domain_pages(pfn,order) phys_to_virt((pfn)<<PAGE_SHIFT) #define unmap_domain_pages(va,order) ((void)((void)(va),(void)(order))) +#define map_domain_page_global(pfn) phys_to_virt((pfn)<<PAGE_SHIFT) +#define unmap_domain_page_global(va) ((void)(va)) + struct domain_mmap_cache { }; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |