[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] Changed from page to page_info



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID d75f733f328c9b5df761c442f6450f904ab1b094
# Parent  9c0123c8a1b4de9414001876a262428ca61dc34c
[IA64] Changed from page to page_info

This patch changed from "struct page" to "struct page_info" in the below files.
 - xen/arch/ia64/xen/domain.c
 - xen/arch/ia64/xen/mm_init.c
 - xen/arch/ia64/xen/xenmem.c
 - xen/arch/ia64/xen/xenmisc.c
 - xen/include/asm-ia64/config.h
 - xen/include/asm-ia64/mm.h

This patch is "step1" which we showed by the following mail.
http://lists.xensource.com/archives/html/xen-ia64-devel/2006-03/msg00305.html

Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>

diff -r 9c0123c8a1b4 -r d75f733f328c xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Thu Mar 16 19:06:48 2006
+++ b/xen/arch/ia64/xen/domain.c        Thu Mar 16 19:10:22 2006
@@ -80,14 +80,14 @@
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void arch_domain_destroy(struct domain *d)
 {
-       struct page *page;
+       struct page_info *page;
        struct list_head *ent, *prev;
 
        if (d->arch.mm->pgd != NULL)
        {
                list_for_each ( ent, &d->arch.mm->pt_list )
                {
-                       page = list_entry(ent, struct page, list);
+                       page = list_entry(ent, struct page_info, list);
                        prev = ent->prev;
                        list_del(ent);
                        free_xenheap_page(page_to_virt(page));
@@ -340,7 +340,7 @@
 static void relinquish_memory(struct domain *d, struct list_head *list)
 {
     struct list_head *ent;
-    struct page      *page;
+    struct page_info *page;
 #ifndef __ia64__
     unsigned long     x, y;
 #endif
@@ -350,7 +350,7 @@
     ent = list->next;
     while ( ent != list )
     {
-        page = list_entry(ent, struct page, list);
+        page = list_entry(ent, struct page_info, list);
         /* Grab a reference to the page so it won't disappear from under us. */
         if ( unlikely(!get_page(page, d)) )
         {
@@ -468,7 +468,7 @@
        }
 }
 
-static struct page * assign_new_domain0_page(unsigned long mpaddr)
+static struct page_info * assign_new_domain0_page(unsigned long mpaddr)
 {
        if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
                printk("assign_new_domain0_page: bad domain0 mpaddr 
0x%lx!\n",mpaddr);
@@ -480,10 +480,10 @@
 }
 
 /* allocate new page for domain and map it to the specified metaphysical addr 
*/
-struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
+struct page_info * assign_new_domain_page(struct domain *d, unsigned long 
mpaddr)
 {
        struct mm_struct *mm = d->arch.mm;
-       struct page *pt, *p = (struct page *)0;
+       struct page_info *pt, *p = (struct page_info *)0;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -549,7 +549,7 @@
 void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long 
physaddr)
 {
        struct mm_struct *mm = d->arch.mm;
-       struct page *pt;
+       struct page_info *pt;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -746,7 +746,7 @@
        Elf_Phdr phdr;
        int h, filesz, memsz;
        unsigned long elfaddr, dom_mpaddr, dom_imva;
-       struct page *p;
+       struct page_info *p;
   
        copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr));
        for ( h = 0; h < ehdr.e_phnum; h++ ) {
diff -r 9c0123c8a1b4 -r d75f733f328c xen/arch/ia64/xen/mm_init.c
--- a/xen/arch/ia64/xen/mm_init.c       Thu Mar 16 19:06:48 2006
+++ b/xen/arch/ia64/xen/mm_init.c       Thu Mar 16 19:10:22 2006
@@ -60,13 +60,13 @@
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 unsigned long vmalloc_end = VMALLOC_END_INIT;
 EXPORT_SYMBOL(vmalloc_end);
-struct page *vmem_map;
+struct page_info *vmem_map;
 EXPORT_SYMBOL(vmem_map);
 #endif
 
 // static int pgt_cache_water[2] = { 25, 50 };
 
-struct page *zero_page_memmap_ptr;             /* map entry for zero page */
+struct page_info *zero_page_memmap_ptr;        /* map entry for zero page */
 EXPORT_SYMBOL(zero_page_memmap_ptr);
 
 #ifdef XEN
@@ -172,7 +172,7 @@
 pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long 
address)
 {
        if (!pmd_present(*pmd)) {
-               struct page *new;
+               struct page_info *new;
 
                spin_unlock(&mm->page_table_lock);
                new = pte_alloc_one(mm, address);
@@ -202,7 +202,7 @@
 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
 {
        unsigned long addr;
-       struct page *page;
+       struct page_info *page;
 
        if (!pte_exec(pte))
                return;                         /* not an executable page... */
@@ -386,7 +386,7 @@
 create_mem_map_page_table (u64 start, u64 end, void *arg)
 {
        unsigned long address, start_page, end_page;
-       struct page *map_start, *map_end;
+       struct page_info *map_start, *map_end;
        int node;
        pgd_t *pgd;
        pmd_t *pmd;
@@ -417,8 +417,8 @@
 }
 
 struct memmap_init_callback_data {
-       struct page *start;
-       struct page *end;
+       struct page_info *start;
+       struct page_info *end;
        int nid;
        unsigned long zone;
 };
@@ -427,7 +427,7 @@
 virtual_memmap_init (u64 start, u64 end, void *arg)
 {
        struct memmap_init_callback_data *args;
-       struct page *map_start, *map_end;
+       struct page_info *map_start, *map_end;
 
        args = (struct memmap_init_callback_data *) arg;
 
@@ -440,13 +440,13 @@
                map_end = args->end;
 
        /*
-        * We have to initialize "out of bounds" struct page elements that fit 
completely
+        * We have to initialize "out of bounds" struct page_info elements that 
fit completely
         * on the same pages that were allocated for the "in bounds" elements 
because they
         * may be referenced later (and found to be "reserved").
         */
-       map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / 
sizeof(struct page);
+       map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / 
sizeof(struct page_info);
        map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) 
map_end)
-                   / sizeof(struct page));
+                   / sizeof(struct page_info));
 
        if (map_start < map_end)
                memmap_init_zone(map_start, (unsigned long) (map_end - 
map_start),
@@ -455,7 +455,7 @@
 }
 
 void
-memmap_init (struct page *start, unsigned long size, int nid,
+memmap_init (struct page_info *start, unsigned long size, int nid,
             unsigned long zone, unsigned long start_pfn)
 {
        if (!vmem_map)
@@ -476,7 +476,7 @@
 ia64_mfn_valid (unsigned long pfn)
 {
        char byte;
-       struct page *pg = mfn_to_page(pfn);
+       struct page_info *pg = mfn_to_page(pfn);
 
        return     (__get_user(byte, (char *) pg) == 0)
                && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
diff -r 9c0123c8a1b4 -r d75f733f328c xen/arch/ia64/xen/xenmem.c
--- a/xen/arch/ia64/xen/xenmem.c        Thu Mar 16 19:06:48 2006
+++ b/xen/arch/ia64/xen/xenmem.c        Thu Mar 16 19:10:22 2006
@@ -13,12 +13,12 @@
 #include <asm/pgtable.h>
 #include <xen/mm.h>
 
-extern struct page *zero_page_memmap_ptr;
+extern struct page_info *zero_page_memmap_ptr;
 struct page_info *frame_table;
 unsigned long frame_table_size;
 unsigned long max_page;
 
-struct page *mem_map;
+struct page_info *mem_map;
 #define MAX_DMA_ADDRESS ~0UL   // FIXME???
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
diff -r 9c0123c8a1b4 -r d75f733f328c xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Thu Mar 16 19:06:48 2006
+++ b/xen/arch/ia64/xen/xenmisc.c       Thu Mar 16 19:10:22 2006
@@ -171,7 +171,7 @@
        return (unsigned long)p;
 }
 
-void __free_pages(struct page *page, unsigned int order)
+void __free_pages(struct page_info *page, unsigned int order)
 {
        if (order) BUG();
        free_xenheap_page(page);
diff -r 9c0123c8a1b4 -r d75f733f328c xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Thu Mar 16 19:06:48 2006
+++ b/xen/include/asm-ia64/config.h     Thu Mar 16 19:10:22 2006
@@ -73,8 +73,11 @@
 extern unsigned long dom0_start;
 extern unsigned long dom0_size;
 
+// see include/asm-ia64/mm.h, handle remaining page_info uses until gone
+#define page_info page
+
 // from linux/include/linux/mm.h
-extern struct page *mem_map;
+extern struct page_info *mem_map;
 
 // xen/include/asm/config.h
 extern char _end[]; /* standard ELF symbol */
@@ -134,9 +137,6 @@
 #define smp_num_siblings 1
 #endif
 
-// from linux/include/linux/mm.h
-struct page;
-
 // function calls; see decl in xen/include/xen/sched.h
 #undef free_task_struct
 #undef alloc_task_struct
@@ -206,8 +206,6 @@
 #define _atomic_read(v) ((v).counter)
 #define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, 
_atomic_read(old), _atomic_read(new)) })
 
-// see include/asm-ia64/mm.h, handle remaining page_info uses until gone
-#define page_info page
 // Deprivated linux inf and put here for short time compatibility
 #define kmalloc(s, t) xmalloc_bytes((s))
 #define kfree(s) xfree((s))
diff -r 9c0123c8a1b4 -r d75f733f328c xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Thu Mar 16 19:06:48 2006
+++ b/xen/include/asm-ia64/mm.h Thu Mar 16 19:10:22 2006
@@ -36,7 +36,7 @@
 
 #define PRtype_info "08x"
 
-struct page
+struct page_info
 {
     /* Each frame can be threaded onto a doubly-linked list. */
     struct list_head list;
@@ -228,7 +228,7 @@
 
 // prototype of misc memory stuff
 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
-//void __free_pages(struct page *page, unsigned int order);
+//void __free_pages(struct page_info *page, unsigned int order);
 void *pgtable_quicklist_alloc(void);
 void pgtable_quicklist_free(void *pgtable_entry);
 
@@ -348,11 +348,11 @@
 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - 
MAX_ZONES_SHIFT)
 #define NODEZONE(node, zone)   ((node << ZONES_SHIFT) | zone)
 
-static inline unsigned long page_zonenum(struct page *page)
+static inline unsigned long page_zonenum(struct page_info *page)
 {
        return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
 }
-static inline unsigned long page_to_nid(struct page *page)
+static inline unsigned long page_to_nid(struct page_info *page)
 {
        return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
 }
@@ -360,12 +360,12 @@
 struct zone;
 extern struct zone *zone_table[];
 
-static inline struct zone *page_zone(struct page *page)
+static inline struct zone *page_zone(struct page_info *page)
 {
        return zone_table[page->flags >> NODEZONE_SHIFT];
 }
 
-static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
+static inline void set_page_zone(struct page_info *page, unsigned long 
nodezone_num)
 {
        page->flags &= ~(~0UL << NODEZONE_SHIFT);
        page->flags |= nodezone_num << NODEZONE_SHIFT;
@@ -376,7 +376,7 @@
 extern unsigned long max_mapnr;
 #endif
 
-static inline void *lowmem_page_address(struct page *page)
+static inline void *lowmem_page_address(struct page_info *page)
 {
        return __va(page_to_mfn(page) << PAGE_SHIFT);
 }
@@ -395,8 +395,8 @@
 #endif
 
 #if defined(HASHED_PAGE_VIRTUAL)
-void *page_address(struct page *page);
-void set_page_address(struct page *page, void *virtual);
+void *page_address(struct page_info *page);
+void set_page_address(struct page_info *page, void *virtual);
 void page_address_init(void);
 #endif
 
@@ -409,7 +409,7 @@
 
 #ifndef CONFIG_DEBUG_PAGEALLOC
 static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
+kernel_map_pages(struct page_info *page, int numpages, int enable)
 {
 }
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.