[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] merge?
# HG changeset patch # User cl349@xxxxxxxxxxxxxxxxxxxx # Node ID 1fc6473ecc01534798a94ada161878dfa3330e2a # Parent 9225c3f597db755f448429a270200c0d2c7a5a78 # Parent 551870a55f240791695d30fd7fa92a1bf4e48387 merge? diff -r 9225c3f597db -r 1fc6473ecc01 linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Tue Aug 30 20:02:59 2005 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Tue Aug 30 20:03:51 2005 @@ -44,13 +44,6 @@ #include <asm-xen/hypervisor.h> #include <asm-xen/evtchn.h> -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) -EXPORT_SYMBOL(force_evtchn_callback); -EXPORT_SYMBOL(evtchn_do_upcall); -EXPORT_SYMBOL(bind_evtchn_to_irq); -EXPORT_SYMBOL(unbind_evtchn_from_irq); -#endif - /* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. @@ -133,6 +126,7 @@ { (void)HYPERVISOR_xen_version(0); } +EXPORT_SYMBOL(force_evtchn_callback); /* NB. Interrupts are disabled on entry. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs) @@ -165,6 +159,7 @@ } } } +EXPORT_SYMBOL(evtchn_do_upcall); static int find_unbound_irq(void) { @@ -211,6 +206,7 @@ return irq; } +EXPORT_SYMBOL(bind_virq_to_irq); void unbind_virq_from_irq(int virq) { @@ -244,6 +240,7 @@ spin_unlock(&irq_mapping_update_lock); } +EXPORT_SYMBOL(unbind_virq_from_irq); int bind_ipi_to_irq(int ipi) { @@ -279,6 +276,7 @@ return irq; } +EXPORT_SYMBOL(bind_ipi_to_irq); void unbind_ipi_from_irq(int ipi) { @@ -306,6 +304,7 @@ spin_unlock(&irq_mapping_update_lock); } +EXPORT_SYMBOL(unbind_ipi_from_irq); int bind_evtchn_to_irq(unsigned int evtchn) { @@ -326,6 +325,7 @@ return irq; } +EXPORT_SYMBOL(bind_evtchn_to_irq); void unbind_evtchn_from_irq(unsigned int evtchn) { @@ -341,6 +341,7 @@ spin_unlock(&irq_mapping_update_lock); } +EXPORT_SYMBOL(unbind_evtchn_from_irq); int bind_evtchn_to_irqhandler( unsigned int evtchn, @@ -359,6 +360,7 @@ return retval; } +EXPORT_SYMBOL(bind_evtchn_to_irqhandler); void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id) { @@ -366,6 +368,7 @@ free_irq(irq, dev_id); unbind_evtchn_from_irq(evtchn); } +EXPORT_SYMBOL(unbind_evtchn_from_irqhandler); #ifdef CONFIG_SMP static void do_nothing_function(void *ign) diff -r 9225c3f597db -r 1fc6473ecc01 linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Tue Aug 30 20:02:59 2005 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Tue Aug 30 20:03:51 2005 @@ -149,7 +149,7 @@ pmd_t *pmd; pte_t *pte; - pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); + pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); pgd += pgd_index(address); printk("PGD %lx ", pgd_val(*pgd)); @@ -296,9 +296,9 @@ #define MEM_VERBOSE 1 #ifdef MEM_VERBOSE -#define MEM_LOG(_f, _a...) \ - printk("fault.c:[%d]-> " _f "\n", \ - __LINE__ , ## _a ) +#define MEM_LOG(_f, _a...) \ + printk("fault.c:[%d]-> " _f "\n", \ + __LINE__ , ## _a ) #else #define MEM_LOG(_f, _a...) ((void)0) #endif @@ -325,7 +325,7 @@ siginfo_t info; if (!user_mode(regs)) - error_code &= ~4; /* means kernel */ + error_code &= ~4; /* means kernel */ #ifdef CONFIG_CHECKING { diff -r 9225c3f597db -r 1fc6473ecc01 linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 30 20:02:59 2005 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 30 20:03:51 2005 @@ -62,14 +62,16 @@ * avaialble in init_memory_mapping(). */ -#define addr_to_page(addr, page) \ - (addr) &= PHYSICAL_PAGE_MASK; \ - (page) = ((unsigned long *) ((unsigned long)(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map))) +#define addr_to_page(addr, page) \ + (addr) &= PHYSICAL_PAGE_MASK; \ + (page) = ((unsigned long *) ((unsigned long) \ + (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \ + __START_KERNEL_map))) static void __make_page_readonly(unsigned long va) { - unsigned long addr; - pte_t pte, *ptep; + unsigned long addr; + pte_t pte, *ptep; unsigned long *page = (unsigned long *) init_level4_pgt; addr = (unsigned long) page[pgd_index(va)]; @@ -89,22 +91,22 @@ static void __make_page_writable(unsigned long va) { - unsigned long addr; - pte_t pte, *ptep; - unsigned long *page = (unsigned long *) init_level4_pgt; - - addr = (unsigned long) page[pgd_index(va)]; - addr_to_page(addr, page); - - addr = page[pud_index(va)]; - addr_to_page(addr, page); - - addr = page[pmd_index(va)]; - addr_to_page(addr, page); - - ptep = (pte_t *) &page[pte_index(va)]; + unsigned long addr; + pte_t pte, *ptep; + unsigned long *page = (unsigned long *) init_level4_pgt; + + addr = (unsigned long) page[pgd_index(va)]; + addr_to_page(addr, page); + + addr = page[pud_index(va)]; + addr_to_page(addr, page); + + addr = page[pmd_index(va)]; + addr_to_page(addr, page); + + ptep = (pte_t *) &page[pte_index(va)]; pte.pte = (ptep->pte | _PAGE_RW); - xen_l1_entry_update(ptep, pte); + xen_l1_entry_update(ptep, pte); __flush_tlb_one(addr); } @@ -115,55 +117,55 @@ void make_page_readonly(void *va) { pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; - unsigned long addr = (unsigned long) va; - - if (!init_mapping_done) { - __make_page_readonly(addr); - return; - } - - pgd = pgd_offset_k(addr); - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - ptep = pte_offset_kernel(pmd, addr); + unsigned long addr = (unsigned long) va; + + if (!init_mapping_done) { + __make_page_readonly(addr); + return; + } + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + ptep = pte_offset_kernel(pmd, addr); pte.pte = (ptep->pte & ~_PAGE_RW); - xen_l1_entry_update(ptep, pte); + xen_l1_entry_update(ptep, pte); __flush_tlb_one(addr); } void make_page_writable(void *va) { - pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; - unsigned long addr = (unsigned long) va; - - if (!init_mapping_done) { - __make_page_writable(addr); - return; - } - - pgd = pgd_offset_k(addr); - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - ptep = pte_offset_kernel(pmd, addr); + pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; + unsigned long addr = (unsigned long) va; + + if (!init_mapping_done) { + __make_page_writable(addr); + return; + } + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + ptep = pte_offset_kernel(pmd, addr); pte.pte = (ptep->pte | _PAGE_RW); - xen_l1_entry_update(ptep, pte); + xen_l1_entry_update(ptep, pte); __flush_tlb_one(addr); } void make_pages_readonly(void* va, unsigned nr) { - while ( nr-- != 0 ) { - make_page_readonly(va); - va = (void*)((unsigned long)va + PAGE_SIZE); - } + while (nr-- != 0) { + make_page_readonly(va); + va = (void*)((unsigned long)va + PAGE_SIZE); + } } void make_pages_writable(void* va, unsigned nr) { - while ( nr-- != 0 ) { - make_page_writable(va); - va = (void*)((unsigned long)va + PAGE_SIZE); - } + while (nr-- != 0) { + make_page_writable(va); + va = (void*)((unsigned long)va + PAGE_SIZE); + } } /* @@ -389,7 +391,7 @@ set_pte_phys(address, phys, prot, SET_FIXMAP_USER); } -unsigned long __initdata table_start, table_end, tables_space; +unsigned long __initdata table_start, tables_space; unsigned long get_machine_pfn(unsigned long addr) { @@ -400,40 +402,15 @@ return pte_mfn(*pte); } -#define ALIGN_TO_4K __attribute__((section(".data.page_aligned"))) -#define MAX_LOW_PAGES 0x20 -static unsigned long __init_pgt[MAX_LOW_PAGES][512] ALIGN_TO_4K; -static int __init_pgt_index; - -/* - * We start using from start_pfn - */ static __init void *alloc_static_page(unsigned long *phys) { - int i = __init_pgt_index++; - - if (__init_pgt_index >= MAX_LOW_PAGES) { - printk("Need to increase MAX_LOW_PAGES"); - BUG(); - } - - *phys = __pa(__init_pgt[i]); - - return (void *) __init_pgt[i]; + unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map; + *phys = start_pfn << PAGE_SHIFT; + start_pfn++; + memset((void *)va, 0, PAGE_SIZE); + return (void *)va; } -/* - * Get RO page - */ -static void __init *alloc_low_page(unsigned long *phys) -{ - unsigned long pfn = table_end++; - - *phys = (pfn << PAGE_SHIFT); - memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE); - return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map); -} - #define PTE_SIZE PAGE_SIZE static inline void __set_pte(pte_t *dst, pte_t val) @@ -443,27 +420,21 @@ static inline int make_readonly(unsigned long paddr) { - int readonly = 0; - - /* Make new page tables read-only. */ - if ((paddr < ((table_start << PAGE_SHIFT) + tables_space)) && - (paddr >= (table_start << PAGE_SHIFT))) - readonly = 1; - - /* Make old page tables read-only. */ - if ((paddr < ((xen_start_info.pt_base - __START_KERNEL_map) + - (xen_start_info.nr_pt_frames << PAGE_SHIFT))) && - (paddr >= (xen_start_info.pt_base - __START_KERNEL_map))) - readonly = 1; - - /* - * No need for writable mapping of kernel image. This also ensures that - * page and descriptor tables embedded inside don't have writable mappings. - */ - if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))) - readonly = 1; - - return readonly; + int readonly = 0; + + /* Make old and new page tables read-only. */ + if ((paddr >= (xen_start_info.pt_base - __START_KERNEL_map)) + && (paddr < ((table_start << PAGE_SHIFT) + tables_space))) + readonly = 1; + /* + * No need for writable mapping of kernel image. This also ensures that + * page and descriptor tables embedded inside don't have writable + * mappings. + */ + if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))) + readonly = 1; + + return readonly; } static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) @@ -485,7 +456,7 @@ break; } - pmd = alloc_low_page(&pmd_phys); + pmd = alloc_static_page(&pmd_phys); make_page_readonly(pmd); xen_pmd_pin(pmd_phys); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); @@ -499,7 +470,7 @@ set_pmd(pmd, __pmd(0)); break; } - pte = alloc_low_page(&pte_phys); + pte = alloc_static_page(&pte_phys); pte_save = pte; for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) { if ((paddr >= end) || @@ -526,15 +497,16 @@ static void __init find_early_table_space(unsigned long end) { - unsigned long puds, pmds, ptes; + unsigned long puds, pmds, ptes; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT; - - tables_space = round_up(puds * 8, PAGE_SIZE) + - round_up(pmds * 8, PAGE_SIZE) + - round_up(ptes * 8, PAGE_SIZE); + ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT; + + tables_space = + round_up(puds * 8, PAGE_SIZE) + + round_up(pmds * 8, PAGE_SIZE) + + round_up(ptes * 8, PAGE_SIZE); } void __init xen_init_pt(void) @@ -580,65 +552,58 @@ mk_kernel_pgd(__pa_symbol(level3_user_pgt))); } -/* - * Extend kernel mapping to access pages for page tables. The initial - * mapping done by Xen is minimal (e.g. 8MB) and we need to extend the - * mapping for early initialization. - */ -static unsigned long current_size, extended_size; - void __init extend_init_mapping(void) { unsigned long va = __START_KERNEL_map; unsigned long phys, addr, *pte_page; - pmd_t *pmd; + pmd_t *pmd; pte_t *pte, new_pte; - unsigned long *page = (unsigned long *) init_level4_pgt; - int i; + unsigned long *page = (unsigned long *)init_level4_pgt; addr = page[pgd_index(va)]; addr_to_page(addr, page); addr = page[pud_index(va)]; addr_to_page(addr, page); - for (;;) { + /* Kill mapping of low 1MB. */ + while (va < (unsigned long)&_text) { + HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); + va += PAGE_SIZE; + } + + /* Ensure init mappings cover kernel text/data and initial tables. */ + while (va < (__START_KERNEL_map + + (start_pfn << PAGE_SHIFT) + + tables_space)) { pmd = (pmd_t *)&page[pmd_index(va)]; - if (!pmd_present(*pmd)) - break; - addr = page[pmd_index(va)]; - addr_to_page(addr, pte_page); - for (i = 0; i < PTRS_PER_PTE; i++) { - pte = (pte_t *) &pte_page[pte_index(va)]; - if (!pte_present(*pte)) - break; - va += PAGE_SIZE; - current_size += PAGE_SIZE; + if (pmd_none(*pmd)) { + pte_page = alloc_static_page(&phys); + make_page_readonly(pte_page); + xen_pte_pin(phys); + set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); + } else { + addr = page[pmd_index(va)]; + addr_to_page(addr, pte_page); } - } - - while (va < __START_KERNEL_map + current_size + tables_space) { - pmd = (pmd_t *) &page[pmd_index(va)]; - if (!pmd_none(*pmd)) - continue; - pte_page = (unsigned long *) alloc_static_page(&phys); - make_page_readonly(pte_page); - xen_pte_pin(phys); - set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); - for (i = 0; i < PTRS_PER_PTE; i++, va += PAGE_SIZE) { + pte = (pte_t *)&pte_page[pte_index(va)]; + if (pte_none(*pte)) { new_pte = pfn_pte( (va - __START_KERNEL_map) >> PAGE_SHIFT, __pgprot(_KERNPG_TABLE | _PAGE_USER)); - pte = (pte_t *)&pte_page[pte_index(va)]; xen_l1_entry_update(pte, new_pte); - extended_size += PAGE_SIZE; } - } - - /* Kill mapping of low 1MB. */ - for (va = __START_KERNEL_map; va < (unsigned long)&_text; va += PAGE_SIZE) + va += PAGE_SIZE; + } + + /* Finally, blow away any spurious initial mappings. */ + while (1) { + pmd = (pmd_t *)&page[pmd_index(va)]; + if (pmd_none(*pmd)) + break; HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); -} - + va += PAGE_SIZE; + } +} /* Setup the direct mapping of the physical memory at PAGE_OFFSET. This runs before bootmem is initialized and gets pages directly from the @@ -651,34 +616,31 @@ find_early_table_space(end); extend_init_mapping(); - start_pfn = current_size >> PAGE_SHIFT; table_start = start_pfn; - table_end = table_start; start = (unsigned long)__va(start); end = (unsigned long)__va(end); for (; start < end; start = next) { unsigned long pud_phys; - pud_t *pud = alloc_low_page(&pud_phys); - make_page_readonly(pud); - xen_pud_pin(pud_phys); + pud_t *pud = alloc_static_page(&pud_phys); + make_page_readonly(pud); + xen_pud_pin(pud_phys); next = start + PGDIR_SIZE; if (next > end) next = end; phys_pud_init(pud, __pa(start), __pa(next)); set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); - } - - printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, - table_start<<PAGE_SHIFT, - table_end<<PAGE_SHIFT); - - start_pfn = ((current_size + extended_size) >> PAGE_SHIFT); + } + + printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", + __pa(end), table_start<<PAGE_SHIFT, start_pfn<<PAGE_SHIFT); + + BUG_ON(start_pfn != (table_start + (tables_space >> PAGE_SHIFT))); __flush_tlb_all(); - init_mapping_done = 1; + init_mapping_done = 1; } extern struct x8664_pda cpu_pda[NR_CPUS]; @@ -1003,3 +965,13 @@ { return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); } + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff -r 9225c3f597db -r 1fc6473ecc01 linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Tue Aug 30 20:02:59 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Tue Aug 30 20:03:51 2005 @@ -45,7 +45,9 @@ static char printf_buffer[4096]; static LIST_HEAD(watches); + DECLARE_MUTEX(xenbus_lock); +EXPORT_SYMBOL(xenbus_lock); static int get_error(const char *errorstring) { @@ -224,6 +226,7 @@ ret[(*num)++] = p; return ret; } +EXPORT_SYMBOL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(const char *dir, const char *node) @@ -237,6 +240,7 @@ kfree(d); return 1; } +EXPORT_SYMBOL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. @@ -277,18 +281,21 @@ return xs_error(xs_talkv(XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); } +EXPORT_SYMBOL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(const char *dir, const char *node) { return xs_error(xs_single(XS_MKDIR, join(dir, node), NULL)); } +EXPORT_SYMBOL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(const char *dir, const char *node) { return xs_error(xs_single(XS_RM, join(dir, node), NULL)); } +EXPORT_SYMBOL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. diff -r 9225c3f597db -r 1fc6473ecc01 tools/misc/cpuperf/cpuperf.c --- a/tools/misc/cpuperf/cpuperf.c Tue Aug 30 20:02:59 2005 +++ b/tools/misc/cpuperf/cpuperf.c Tue Aug 30 20:03:51 2005 @@ -243,16 +243,12 @@ } if (read) { - while((cpu_mask&1)) { - int i; - for (i=0x300;i<0x312;i++) { - printf("%010llu ",cpus_rdmsr( cpu_mask, i ) ); - } - printf("\n"); - cpu_mask>>=1; - } + int i; + for (i=0x300;i<0x312;i++) + printf("%010llu ",cpus_rdmsr( cpu_mask, i ) ); + printf("\n"); exit(1); - } + } if (!escr) { fprintf(stderr, "Need an ESCR.\n"); diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/domain.c Tue Aug 30 20:03:51 2005 @@ -255,13 +255,13 @@ v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id]; v->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); - machine_to_phys_mapping[virt_to_phys(d->shared_info) >> - PAGE_SHIFT] = INVALID_M2P_ENTRY; + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, + INVALID_M2P_ENTRY); d->arch.mm_perdomain_pt = alloc_xenheap_page(); memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE); - machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> - PAGE_SHIFT] = INVALID_M2P_ENTRY; + set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT, + INVALID_M2P_ENTRY); v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/domain_build.c --- a/xen/arch/x86/domain_build.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/domain_build.c Tue Aug 30 20:03:51 2005 @@ -592,8 +592,7 @@ if ( opt_dom0_translate ) { si->shared_info = d->next_io_page << PAGE_SHIFT; - set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT, - d->next_io_page); + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, d->next_io_page); d->next_io_page++; } else @@ -614,7 +613,7 @@ mfn = alloc_epfn - (pfn - REVERSE_START); #endif ((u32 *)vphysmap_start)[pfn] = mfn; - machine_to_phys_mapping[mfn] = pfn; + set_pfn_from_mfn(mfn, pfn); } while ( pfn < nr_pages ) { @@ -627,7 +626,7 @@ #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn))) #endif ((u32 *)vphysmap_start)[pfn] = mfn; - machine_to_phys_mapping[mfn] = pfn; + set_pfn_from_mfn(mfn, pfn); #undef pfn page++; pfn++; } diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/mm.c Tue Aug 30 20:03:51 2005 @@ -1452,7 +1452,7 @@ "!= exp %" PRtype_info ") " "for mfn %lx (pfn %x)", x, type, page_to_pfn(page), - machine_to_phys_mapping[page_to_pfn(page)]); + get_pfn_from_mfn(page_to_pfn(page))); return 0; } else if ( (x & PGT_va_mask) == PGT_va_mutable ) @@ -2206,7 +2206,7 @@ printk("privileged guest dom%d requests pfn=%lx to " "map mfn=%lx for dom%d\n", d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id); - set_machinetophys(mfn, gpfn); + set_pfn_from_mfn(mfn, gpfn); set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache); okay = 1; shadow_unlock(FOREIGNDOM); @@ -2225,7 +2225,7 @@ break; } - set_machinetophys(mfn, gpfn); + set_pfn_from_mfn(mfn, gpfn); okay = 1; /* diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/shadow32.c Tue Aug 30 20:03:51 2005 @@ -827,7 +827,7 @@ { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); - pfn = machine_to_phys_mapping[mfn]; + pfn = get_pfn_from_mfn(mfn); ASSERT(pfn != INVALID_M2P_ENTRY); ASSERT(pfn < (1u<<20)); @@ -841,7 +841,7 @@ { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); - pfn = machine_to_phys_mapping[mfn]; + pfn = get_pfn_from_mfn(mfn); if ( (pfn != INVALID_M2P_ENTRY) && (pfn < (1u<<20)) ) { diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/shadow_public.c --- a/xen/arch/x86/shadow_public.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/shadow_public.c Tue Aug 30 20:03:51 2005 @@ -1311,7 +1311,7 @@ { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); - pfn = machine_to_phys_mapping[mfn]; + pfn = get_pfn_from_mfn(mfn); ASSERT(pfn != INVALID_M2P_ENTRY); ASSERT(pfn < (1u<<20)); @@ -1325,7 +1325,7 @@ { page = list_entry(list_ent, struct pfn_info, list); mfn = page_to_pfn(page); - pfn = machine_to_phys_mapping[mfn]; + pfn = get_pfn_from_mfn(mfn); if ( (pfn != INVALID_M2P_ENTRY) && (pfn < (1u<<20)) ) { diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/traps.c Tue Aug 30 20:03:51 2005 @@ -100,6 +100,7 @@ static int debug_stack_lines = 20; integer_param("debug_stack_lines", debug_stack_lines); +#define stack_words_per_line (32 / BYTES_PER_LONG) int is_kernel_text(unsigned long addr) { @@ -125,7 +126,7 @@ printk("Guest stack trace from "__OP"sp=%p:\n ", stack); - for ( i = 0; i < (debug_stack_lines*8); i++ ) + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) { if ( ((long)stack & (STACK_SIZE-1)) == 0 ) break; @@ -137,7 +138,7 @@ i = 1; break; } - if ( (i != 0) && ((i % 8) == 0) ) + if ( (i != 0) && ((i % stack_words_per_line) == 0) ) printk("\n "); printk("%p ", _p(addr)); stack++; @@ -176,11 +177,11 @@ printk("Xen stack trace from "__OP"sp=%p:\n ", stack); - for ( i = 0; i < (debug_stack_lines*8); i++ ) + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) { if ( ((long)stack & (STACK_SIZE-1)) == 0 ) break; - if ( (i != 0) && ((i % 8) == 0) ) + if ( (i != 0) && ((i % stack_words_per_line) == 0) ) printk("\n "); addr = *stack++; printk("%p ", _p(addr)); diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/vmx.c --- a/xen/arch/x86/vmx.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/vmx.c Tue Aug 30 20:03:51 2005 @@ -694,7 +694,7 @@ return 0; } - mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT); addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); if (dir == COPY_IN) @@ -795,7 +795,7 @@ * removed some translation or changed page attributes. * We simply invalidate the shadow. */ - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); if (mfn != pagetable_get_pfn(d->arch.guest_table)) { printk("Invalid CR3 value=%x", c->cr3); domain_crash_synchronous(); @@ -813,7 +813,7 @@ domain_crash_synchronous(); return 0; } - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); update_pagetables(d); /* @@ -968,7 +968,7 @@ /* * The guest CR3 must be pointing to the guest physical. */ - if ( !VALID_MFN(mfn = phys_to_machine_mapping( + if ( !VALID_MFN(mfn = get_mfn_from_pfn( d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || !get_page(pfn_to_page(mfn), d->domain) ) { @@ -1164,7 +1164,7 @@ * removed some translation or changed page attributes. * We simply invalidate the shadow. */ - mfn = phys_to_machine_mapping(value >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(value >> PAGE_SHIFT); if (mfn != pagetable_get_pfn(d->arch.guest_table)) __vmx_bug(regs); shadow_sync_all(d->domain); @@ -1175,7 +1175,7 @@ */ VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) || - !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) || + !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) || !get_page(pfn_to_page(mfn), d->domain) ) { printk("Invalid CR3 value=%lx", value); diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/vmx_platform.c --- a/xen/arch/x86/vmx_platform.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/vmx_platform.c Tue Aug 30 20:03:51 2005 @@ -521,7 +521,7 @@ if ( vmx_paging_enabled(current) ) { gpa = gva_to_gpa(guest_eip); - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); /* Does this cross a page boundary ? */ if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) ) @@ -532,7 +532,7 @@ } else { - mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(guest_eip >> PAGE_SHIFT); } inst_start = map_domain_page(mfn); @@ -542,7 +542,7 @@ if ( remaining ) { gpa = gva_to_gpa(guest_eip+inst_len+remaining); - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); inst_start = map_domain_page(mfn); memcpy((char *)buf+inst_len, inst_start, remaining); diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/vmx_vmcs.c --- a/xen/arch/x86/vmx_vmcs.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/vmx_vmcs.c Tue Aug 30 20:03:51 2005 @@ -148,7 +148,7 @@ offset = (addr & ~PAGE_MASK); addr = round_pgdown(addr); - mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); + mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT); p = map_domain_page(mpfn); e820p = (struct e820entry *) ((unsigned long) p + offset); @@ -175,7 +175,7 @@ unmap_domain_page(p); /* Initialise shared page */ - mpfn = phys_to_machine_mapping(gpfn); + mpfn = get_mfn_from_pfn(gpfn); p = map_domain_page(mpfn); d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p; diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/x86_64/entry.S --- a/xen/arch/x86/x86_64/entry.S Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/x86_64/entry.S Tue Aug 30 20:03:51 2005 @@ -339,7 +339,8 @@ 1: /* In kernel context already: push new frame at existing %rsp. */ movq UREGS_rsp+8(%rsp),%rsi andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest. -2: movq $HYPERVISOR_VIRT_START,%rax +2: andq $~0xf,%rsi # Stack frames are 16-byte aligned. + movq $HYPERVISOR_VIRT_START,%rax cmpq %rax,%rsi jb 1f # In +ve address space? Then okay. movq $HYPERVISOR_VIRT_END+60,%rax diff -r 9225c3f597db -r 1fc6473ecc01 xen/arch/x86/x86_64/traps.c --- a/xen/arch/x86/x86_64/traps.c Tue Aug 30 20:02:59 2005 +++ b/xen/arch/x86/x86_64/traps.c Tue Aug 30 20:03:51 2005 @@ -15,19 +15,22 @@ void show_registers(struct cpu_user_regs *regs) { - printk("CPU: %d\nEIP: %04x:[<%016lx>]", + printk("CPU: %d\nRIP: %04x:[<%016lx>]", smp_processor_id(), 0xffff & regs->cs, regs->rip); if ( !GUEST_MODE(regs) ) print_symbol(" %s", regs->rip); - printk("\nEFLAGS: %016lx\n", regs->eflags); - printk("rax: %016lx rbx: %016lx rcx: %016lx rdx: %016lx\n", - regs->rax, regs->rbx, regs->rcx, regs->rdx); - printk("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", - regs->rsi, regs->rdi, regs->rbp, regs->rsp); - printk("r8: %016lx r9: %016lx r10: %016lx r11: %016lx\n", - regs->r8, regs->r9, regs->r10, regs->r11); - printk("r12: %016lx r13: %016lx r14: %016lx r15: %016lx\n", - regs->r12, regs->r13, regs->r14, regs->r15); + printk("\nRFLAGS: %016lx\n", regs->eflags); + printk("rax: %016lx rbx: %016lx rcx: %016lx\n", + regs->rax, regs->rbx, regs->rcx); + printk("rdx: %016lx rsi: %016lx rdi: %016lx\n", + regs->rdx, regs->rsi, regs->rdi); + printk("rbp: %016lx rsp: %016lx r8: %016lx\n", + regs->rbp, regs->rsp, regs->r8); + printk("r9: %016lx r10: %016lx r11: %016lx\n", + regs->r9, regs->r10, regs->r11); + printk("r12: %016lx r13: %016lx r14: %016lx\n", + regs->r12, regs->r13, regs->r14); + printk("r15: %016lx\n", regs->r15); if ( GUEST_MODE(regs) ) show_guest_stack(); diff -r 9225c3f597db -r 1fc6473ecc01 xen/common/grant_table.c --- a/xen/common/grant_table.c Tue Aug 30 20:02:59 2005 +++ b/xen/common/grant_table.c Tue Aug 30 20:03:51 2005 @@ -1211,13 +1211,13 @@ DPRINTK("Bad pfn (%lx)\n", pfn); else { - machine_to_phys_mapping[frame] = pfn; + set_pfn_from_mfn(frame, pfn); if ( unlikely(shadow_mode_log_dirty(ld))) mark_dirty(ld, frame); if (shadow_mode_translate(ld)) - __phys_to_machine_mapping[pfn] = frame; + set_mfn_from_pfn(pfn, frame); } sha->frame = __mfn_to_gpfn(rd, frame); sha->domid = rd->domain_id; @@ -1268,8 +1268,7 @@ { SHARE_PFN_WITH_DOMAIN( virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d); - machine_to_phys_mapping[(virt_to_phys(t->shared) >> PAGE_SHIFT) + i] = - INVALID_M2P_ENTRY; + set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY); } /* Okay, install the structure. */ diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/asm-ia64/mm.h --- a/xen/include/asm-ia64/mm.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/asm-ia64/mm.h Tue Aug 30 20:03:51 2005 @@ -405,7 +405,7 @@ /* If pmt table is provided by control pannel later, we need __get_user * here. However if it's allocated by HV, we should access it directly */ -#define phys_to_machine_mapping(d, gpfn) \ +#define get_mfn_from_pfn(d, gpfn) \ ((d) == dom0 ? gpfn : \ (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \ INVALID_MFN)) @@ -414,7 +414,7 @@ machine_to_phys_mapping[(mfn)] #define __gpfn_to_mfn(_d, gpfn) \ - phys_to_machine_mapping((_d), (gpfn)) + get_mfn_from_pfn((_d), (gpfn)) #define __gpfn_invalid(_d, gpfn) \ (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK) diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/asm-x86/mm.h Tue Aug 30 20:03:51 2005 @@ -255,10 +255,13 @@ * contiguous (or near contiguous) physical memory. */ #undef machine_to_phys_mapping -#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) +#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) #define INVALID_M2P_ENTRY (~0U) #define VALID_M2P(_e) (!((_e) & (1U<<31))) #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e)) + +#define set_pfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn)) +#define get_pfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) /* * The phys_to_machine_mapping is the reversed mapping of MPT for full @@ -266,17 +269,17 @@ * guests, so we steal the address space that would have normally * been used by the read-only MPT map. */ -#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) -#define INVALID_MFN (~0UL) -#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) - -/* Returns the machine physical */ -static inline unsigned long phys_to_machine_mapping(unsigned long pfn) +#define phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) +#define INVALID_MFN (~0UL) +#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) + +#define set_mfn_from_pfn(pfn, mfn) (phys_to_machine_mapping[(pfn)] = (mfn)) +static inline unsigned long get_mfn_from_pfn(unsigned long pfn) { unsigned long mfn; l1_pgentry_t pte; - if ( (__copy_from_user(&pte, &__phys_to_machine_mapping[pfn], + if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn], sizeof(pte)) == 0) && (l1e_get_flags(pte) & _PAGE_PRESENT) ) mfn = l1e_get_pfn(pte); @@ -285,7 +288,6 @@ return mfn; } -#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) #ifdef MEMORY_GUARD void memguard_init(void); diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/asm-x86/shadow.h Tue Aug 30 20:03:51 2005 @@ -269,14 +269,14 @@ #define __mfn_to_gpfn(_d, mfn) \ ( (shadow_mode_translate(_d)) \ - ? machine_to_phys_mapping[(mfn)] \ + ? get_pfn_from_mfn(mfn) \ : (mfn) ) #define __gpfn_to_mfn(_d, gpfn) \ ({ \ ASSERT(current->domain == (_d)); \ (shadow_mode_translate(_d)) \ - ? phys_to_machine_mapping(gpfn) \ + ? get_mfn_from_pfn(gpfn) \ : (gpfn); \ }) @@ -461,7 +461,7 @@ // This wants the nice compact set of PFNs from 0..domain's max, // which __mfn_to_gpfn() only returns for translated domains. // - pfn = machine_to_phys_mapping[mfn]; + pfn = get_pfn_from_mfn(mfn); /* * Values with the MSB set denote MFNs that aren't really part of the @@ -562,7 +562,7 @@ old_hl2e = v->arch.hl2_vtable[index]; if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) && - VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) ) + VALID_MFN(mfn = get_mfn_from_pfn(l2e_get_pfn(gl2e))) ) new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR); else new_hl2e = l1e_empty(); diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/asm-x86/shadow_64.h --- a/xen/include/asm-x86/shadow_64.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/asm-x86/shadow_64.h Tue Aug 30 20:03:51 2005 @@ -138,7 +138,7 @@ return NULL; mfn = entry_get_value(*le_e) >> PAGE_SHIFT; if ((flag & GUEST_ENTRY) && shadow_mode_translate(d)) - mfn = phys_to_machine_mapping(mfn); + mfn = get_mfn_from_pfn(mfn); le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT); index = table_offset_64(va, (level + i - 1)); le_e = &le_p[index]; @@ -257,7 +257,7 @@ if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) return NULL; - l1mfn = phys_to_machine_mapping( + l1mfn = get_mfn_from_pfn( l2e_get_pfn(gl2e)); l1va = (l1_pgentry_32_t *) @@ -299,7 +299,7 @@ return NULL; - l1mfn = phys_to_machine_mapping( + l1mfn = get_mfn_from_pfn( l2e_get_pfn(gl2e)); l1va = (l1_pgentry_32_t *) phys_to_virt( l1mfn << L1_PAGETABLE_SHIFT); diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/asm-x86/vmx_platform.h --- a/xen/include/asm-x86/vmx_platform.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/asm-x86/vmx_platform.h Tue Aug 30 20:03:51 2005 @@ -91,6 +91,6 @@ extern void vmx_io_assist(struct vcpu *v); // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. -#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT))) +#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_pfn((gpa) >> PAGE_SHIFT))) #endif diff -r 9225c3f597db -r 1fc6473ecc01 xen/include/xen/perfc.h --- a/xen/include/xen/perfc.h Tue Aug 30 20:02:59 2005 +++ b/xen/include/xen/perfc.h Tue Aug 30 20:03:51 2005 @@ -4,6 +4,7 @@ #ifdef PERF_COUNTERS +#include <xen/lib.h> #include <asm/atomic.h> /* @@ -87,7 +88,7 @@ * Histogram: special treatment for 0 and 1 count. After that equally spaced * with last bucket taking the rest. */ -#ifdef PERFC_ARRAYS +#ifdef PERF_ARRAYS #define perfc_incr_histo(_x,_v,_n) \ do { \ if ( (_v) == 0 ) \ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |