[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] miscellaneous cleanup
# HG changeset patch # User Jan Beulich <jbeulich@xxxxxxxx> # Date 1358416594 -3600 # Node ID 431bc26eda5b4d5d5a22807750f7b328e6197e71 # Parent d9c7b82aa7b11fac96d8cbafbd9c9ab6de4a11a9 miscellaneous cleanup ... noticed while putting together the 16Tb support patches for x86. Briefly, this (in order of the changes below) - fixes an inefficiency in x86's context switch code (translations to/ from struct page are more involved than to/from MFNs) - drop unnecessary MFM-to-page conversions - drop a redundant call to destroy_xen_mappings() (an indentical call is being made a few lines up) - simplify a VA-to-MFN translation - drop dead code (several occurrences) - add a missing __init annotation Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> --- diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/arch/x86/domain.c Thu Jan 17 10:56:34 2013 +0100 @@ -1512,12 +1512,12 @@ static void __context_switch(void) per_cpu(compat_gdt_table, cpu); if ( need_full_gdt(n) ) { - struct page_info *page = virt_to_page(gdt); + unsigned long mfn = virt_to_mfn(gdt); unsigned int i; for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ ) l1e_write(n->arch.perdomain_ptes + FIRST_RESERVED_GDT_PAGE + i, - l1e_from_page(page + i, __PAGE_HYPERVISOR)); + l1e_from_pfn(mfn + i, __PAGE_HYPERVISOR)); } if ( need_full_gdt(p) && diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/arch/x86/x86_64/mm.c Thu Jan 17 10:56:34 2013 +0100 @@ -422,11 +422,10 @@ void destroy_m2p_mapping(struct mem_hota */ static int setup_compat_m2p_table(struct mem_hotadd_info *info) { - unsigned long i, va, smap, emap, rwva, epfn = info->epfn; + unsigned long i, va, smap, emap, rwva, epfn = info->epfn, mfn; unsigned int n; l3_pgentry_t *l3_ro_mpt = NULL; l2_pgentry_t *l2_ro_mpt = NULL; - struct page_info *l1_pg; int err = 0; smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1)); @@ -475,16 +474,16 @@ static int setup_compat_m2p_table(struct if ( n == CNT ) continue; - l1_pg = mfn_to_page(alloc_hotadd_mfn(info)); - err = map_pages_to_xen(rwva, page_to_mfn(l1_pg), - 1UL << PAGETABLE_ORDER, + mfn = alloc_hotadd_mfn(info); + err = map_pages_to_xen(rwva, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR); if ( err ) break; /* Fill with INVALID_M2P_ENTRY. */ memset((void *)rwva, 0xFF, 1UL << L2_PAGETABLE_SHIFT); /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */ - l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT)); + l2e_write(&l2_ro_mpt[l2_table_offset(va)], + l2e_from_pfn(mfn, _PAGE_PSE|_PAGE_PRESENT)); } #undef CNT #undef MFN @@ -501,7 +500,7 @@ static int setup_m2p_table(struct mem_ho unsigned int n, memflags; l2_pgentry_t *l2_ro_mpt = NULL; l3_pgentry_t *l3_ro_mpt = NULL; - struct page_info *l1_pg, *l2_pg; + struct page_info *l2_pg; int ret = 0; ASSERT(l4e_get_flags(idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]) @@ -544,15 +543,13 @@ static int setup_m2p_table(struct mem_ho for ( n = 0; n < CNT; ++n) if ( mfn_valid(i + n * PDX_GROUP_COUNT) ) break; - if ( n == CNT ) - l1_pg = NULL; - else + if ( n < CNT ) { - l1_pg = mfn_to_page(alloc_hotadd_mfn(info)); + unsigned long mfn = alloc_hotadd_mfn(info); + ret = map_pages_to_xen( RDWR_MPT_VIRT_START + i * sizeof(unsigned long), - page_to_mfn(l1_pg), - 1UL << PAGETABLE_ORDER, + mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR); if ( ret ) goto error; @@ -584,7 +581,7 @@ static int setup_m2p_table(struct mem_ho } /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */ - l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, + l2e_write(l2_ro_mpt, l2e_from_pfn(mfn, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT)); } if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) ) @@ -1548,8 +1545,6 @@ destroy_frametable: NODE_DATA(node)->node_start_pfn = old_node_start; NODE_DATA(node)->node_spanned_pages = old_node_span; - destroy_xen_mappings((unsigned long)mfn_to_virt(spfn), - (unsigned long)mfn_to_virt(epfn)); return ret; } diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/common/domctl.c --- a/xen/common/domctl.c Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/common/domctl.c Thu Jan 17 10:56:34 2013 +0100 @@ -156,7 +156,7 @@ void getdomaininfo(struct domain *d, str info->max_pages = d->max_pages; info->shr_pages = atomic_read(&d->shr_pages); info->paged_pages = atomic_read(&d->paged_pages); - info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT); + info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info)); BUG_ON(SHARED_M2P(info->shared_info_frame)); info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE; diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/include/asm-arm/mm.h --- a/xen/include/asm-arm/mm.h Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/include/asm-arm/mm.h Thu Jan 17 10:56:34 2013 +0100 @@ -121,7 +121,6 @@ extern unsigned long xenheap_virt_end; #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d)) #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) -#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va)))) #define XENSHARE_writable 0 #define XENSHARE_readonly 1 diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/include/asm-x86/config.h --- a/xen/include/asm-x86/config.h Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/include/asm-x86/config.h Thu Jan 17 10:56:34 2013 +0100 @@ -275,12 +275,8 @@ extern unsigned long xen_phys_start; /* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */ #define GDT_LDT_VCPU_SHIFT 5 #define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT) -#ifdef MAX_VIRT_CPUS -#define GDT_LDT_MBYTES (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT)) -#else #define GDT_LDT_MBYTES PERDOMAIN_MBYTES #define MAX_VIRT_CPUS (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT)) -#endif #define GDT_LDT_VIRT_START PERDOMAIN_VIRT_START #define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20)) diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/include/asm-x86/mm.h Thu Jan 17 10:56:34 2013 +0100 @@ -265,7 +265,6 @@ struct spage_info ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0) #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) -#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va)))) #define XENSHARE_writable 0 #define XENSHARE_readonly 1 diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/include/asm-x86/page.h --- a/xen/include/asm-x86/page.h Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/include/asm-x86/page.h Thu Jan 17 10:56:34 2013 +0100 @@ -261,18 +261,6 @@ void copy_page_sse2(void *, const void * #endif /* !defined(__ASSEMBLY__) */ -/* High table entries are reserved by the hypervisor. */ -#define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0 -#define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0 - -#define DOMAIN_ENTRIES_PER_L4_PAGETABLE \ - (l4_table_offset(HYPERVISOR_VIRT_START)) -#define GUEST_ENTRIES_PER_L4_PAGETABLE \ - (l4_table_offset(HYPERVISOR_VIRT_END)) -#define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \ - (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \ - + DOMAIN_ENTRIES_PER_L4_PAGETABLE) - /* Where to find each level of the linear mapping */ #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START)) #define __linear_l2_table \ diff -r d9c7b82aa7b1 -r 431bc26eda5b xen/xsm/xsm_policy.c --- a/xen/xsm/xsm_policy.c Thu Jan 17 10:55:00 2013 +0100 +++ b/xen/xsm/xsm_policy.c Thu Jan 17 10:56:34 2013 +0100 @@ -25,8 +25,9 @@ char *__initdata policy_buffer = NULL; u32 __initdata policy_size = 0; -int xsm_policy_init(unsigned long *module_map, const multiboot_info_t *mbi, - void *(*bootstrap_map)(const module_t *)) +int __init xsm_policy_init(unsigned long *module_map, + const multiboot_info_t *mbi, + void *(*bootstrap_map)(const module_t *)) { int i; module_t *mod = (module_t *)__va(mbi->mods_addr); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |