[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: make map_domain_page_global() a simple wrapper around vmap()
This is in order to reduce the number of fundamental mapping mechanisms as well as to reduce the amount of code to be maintained. In the course of this the virtual space available to vmap() is being grown from 16Gb to 64Gb. Note that this requires callers of unmap_domain_page_global() to no longer pass misaligned pointers - map_domain_page_global() returns page size aligned pointers, so unmappinmg should be done accordingly. unmap_vcpu_info() violated this and is being adjusted here. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -11,6 +11,7 @@ #include <xen/perfc.h> #include <xen/pfn.h> #include <xen/sched.h> +#include <xen/vmap.h> #include <asm/current.h> #include <asm/flushtlb.h> #include <asm/hardirq.h> @@ -310,18 +311,8 @@ int mapcache_vcpu_init(struct vcpu *v) return 0; } -#define GLOBALMAP_BITS (GLOBALMAP_GBYTES << (30 - PAGE_SHIFT)) -static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)]; -static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)]; -static unsigned int inuse_cursor; -static DEFINE_SPINLOCK(globalmap_lock); - void *map_domain_page_global(unsigned long mfn) { - l1_pgentry_t *pl1e; - unsigned int idx, i; - unsigned long va; - ASSERT(!in_irq() && local_irq_is_enabled()); #ifdef NDEBUG @@ -329,59 +320,19 @@ void *map_domain_page_global(unsigned lo return mfn_to_virt(mfn); #endif - spin_lock(&globalmap_lock); - - idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor); - va = GLOBALMAP_VIRT_START + pfn_to_paddr(idx); - if ( unlikely(va >= GLOBALMAP_VIRT_END) ) - { - /* /First/, clean the garbage map and update the inuse list. */ - for ( i = 0; i < ARRAY_SIZE(garbage); i++ ) - inuse[i] &= ~xchg(&garbage[i], 0); - - /* /Second/, flush all TLBs to get rid of stale garbage mappings. */ - flush_tlb_all(); - - idx = find_first_zero_bit(inuse, GLOBALMAP_BITS); - va = GLOBALMAP_VIRT_START + pfn_to_paddr(idx); - if ( unlikely(va >= GLOBALMAP_VIRT_END) ) - { - spin_unlock(&globalmap_lock); - return NULL; - } - } - - set_bit(idx, inuse); - inuse_cursor = idx + 1; - - pl1e = virt_to_xen_l1e(va); - - spin_unlock(&globalmap_lock); - - if ( !pl1e ) - return NULL; - l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); - - return (void *)va; + return vmap(&mfn, 1); } void unmap_domain_page_global(const void *ptr) { unsigned long va = (unsigned long)ptr; - l1_pgentry_t *pl1e; if ( va >= DIRECTMAP_VIRT_START ) return; - ASSERT(va >= GLOBALMAP_VIRT_START && va < GLOBALMAP_VIRT_END); - - /* /First/, we zap the PTE. */ - pl1e = virt_to_xen_l1e(va); - BUG_ON(!pl1e); - l1e_write(pl1e, l1e_empty()); + ASSERT(va >= VMAP_VIRT_START && va < VMAP_VIRT_END); - /* /Second/, we add to the garbage map. */ - set_bit(PFN_DOWN(va - GLOBALMAP_VIRT_START), garbage); + vunmap(ptr); } /* Translate a map-domain-page'd address to the underlying MFN */ @@ -393,7 +344,7 @@ unsigned long domain_page_map_to_mfn(con if ( va >= DIRECTMAP_VIRT_START ) return virt_to_mfn(ptr); - if ( va >= GLOBALMAP_VIRT_START && va < GLOBALMAP_VIRT_END ) + if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END ) { pl1e = virt_to_xen_l1e(va); BUG_ON(!pl1e); --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -958,7 +958,8 @@ void unmap_vcpu_info(struct vcpu *v) return; mfn = v->vcpu_info_mfn; - unmap_domain_page_global(v->vcpu_info); + unmap_domain_page_global((void *) + ((unsigned long)v->vcpu_info & PAGE_MASK)); v->vcpu_info = &dummy_vcpu_info; v->vcpu_info_mfn = INVALID_MFN; --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -146,17 +146,15 @@ extern unsigned char boot_edid_info[128] * Per-domain mappings (e.g., GDT, LDT). * 0xffff828000000000 - 0xffff82bfffffffff [256GB, 2^38 bytes, PML4:261] * Machine-to-phys translation table. - * 0xffff82c000000000 - 0xffff82c3ffffffff [16GB, 2^34 bytes, PML4:261] + * 0xffff82c000000000 - 0xffff82cfffffffff [64GB, 2^36 bytes, PML4:261] * vmap()/ioremap()/fixmap area. - * 0xffff82c400000000 - 0xffff82c43fffffff [1GB, 2^30 bytes, PML4:261] - * Global domain page map area. - * 0xffff82c440000000 - 0xffff82c47fffffff [1GB, 2^30 bytes, PML4:261] + * 0xffff82d000000000 - 0xffff82d03fffffff [1GB, 2^30 bytes, PML4:261] * Compatibility machine-to-phys translation table. - * 0xffff82c480000000 - 0xffff82c4bfffffff [1GB, 2^30 bytes, PML4:261] + * 0xffff82d040000000 - 0xffff82d07fffffff [1GB, 2^30 bytes, PML4:261] * High read-only compatibility machine-to-phys translation table. - * 0xffff82c4c0000000 - 0xffff82c4ffffffff [1GB, 2^30 bytes, PML4:261] + * 0xffff82d080000000 - 0xffff82d0bfffffff [1GB, 2^30 bytes, PML4:261] * Xen text, static data, bss. - * 0xffff82c500000000 - 0xffff82dffbffffff [108GB - 64MB, PML4:261] + * 0xffff82d0c0000000 - 0xffff82dffbffffff [61GB - 64MB, PML4:261] * Reserved for future use. * 0xffff82dffc000000 - 0xffff82dfffffffff [64MB, 2^26 bytes, PML4:261] * Super-page information array. @@ -220,15 +218,11 @@ extern unsigned char boot_edid_info[128] /* Slot 261: machine-to-phys conversion table (256GB). */ #define RDWR_MPT_VIRT_START (PML4_ADDR(261)) #define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + MPT_VIRT_SIZE) -/* Slot 261: vmap()/ioremap()/fixmap area (16GB). */ +/* Slot 261: vmap()/ioremap()/fixmap area (64GB). */ #define VMAP_VIRT_START RDWR_MPT_VIRT_END -#define VMAP_VIRT_END (VMAP_VIRT_START + GB(16)) -/* Slot 261: global domain page map area (1GB). */ -#define GLOBALMAP_GBYTES 1 -#define GLOBALMAP_VIRT_START VMAP_VIRT_END -#define GLOBALMAP_VIRT_END (GLOBALMAP_VIRT_START + (GLOBALMAP_GBYTES<<30)) +#define VMAP_VIRT_END (VMAP_VIRT_START + GB(64)) /* Slot 261: compatibility machine-to-phys conversion table (1GB). */ -#define RDWR_COMPAT_MPT_VIRT_START GLOBALMAP_VIRT_END +#define RDWR_COMPAT_MPT_VIRT_START VMAP_VIRT_END #define RDWR_COMPAT_MPT_VIRT_END (RDWR_COMPAT_MPT_VIRT_START + GB(1)) /* Slot 261: high read-only compat machine-to-phys conversion table (1GB). */ #define HIRO_COMPAT_MPT_VIRT_START RDWR_COMPAT_MPT_VIRT_END Attachment:
x86-mdpg-vmap.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |