[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] Remove mmzone.h and warning:"MAX_ORDER" redefined



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 20c1a71383a95cb6e7d04fd0c5f30faaf3c41350
# Parent  294e032f14afb76247a1e7ae3ac6c372cf8585a9
[IA64] Remove mmzone.h and warning:"MAX_ORDER" redefined

This patch removed xen/include/asm-ia64/linux/mmzohe.h which
is not used now, and removed warning "MAX_ORDER" redefined 
from xen/common/page_alloc.c.

Signed-off-by: Masaki Kanno <kanno.masaki@xxxxxxxxxxxxxx>

diff -r 294e032f14af -r 20c1a71383a9 xen/include/asm-ia64/linux-xen/linux/gfp.h
--- a/xen/include/asm-ia64/linux-xen/linux/gfp.h        Wed Mar  8 00:08:20 2006
+++ b/xen/include/asm-ia64/linux-xen/linux/gfp.h        Wed Mar  8 00:12:39 2006
@@ -3,6 +3,7 @@
 
 #ifdef XEN
 #include <asm/bitops.h>
+#include <linux/topology.h>
 #endif
 #include <linux/mmzone.h>
 #include <linux/stddef.h>
@@ -85,6 +86,7 @@
 static inline void arch_free_page(struct page *page, int order) { }
 #endif
 
+#ifndef XEN
 extern struct page *
 FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
 
@@ -117,6 +119,7 @@
 #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#endif /* XEN */
 
 extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, 
unsigned int order));
 extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
diff -r 294e032f14af -r 20c1a71383a9 xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin  Wed Mar  8 00:08:20 2006
+++ b/xen/include/asm-ia64/linux/README.origin  Wed Mar  8 00:12:39 2006
@@ -14,7 +14,6 @@
 jiffies.h              -> linux/include/linux/jiffies.h
 kmalloc_sizes.h                -> linux/include/linux/kmalloc_sizes.h
 linkage.h              -> linux/include/linux/linkage.h
-mmzone.h               -> linux/include/linux/mmzone.h
 notifier.h             -> linux/include/linux/notifier.h
 numa.h                 -> linux/include/linux/numa.h
 page-flags.h           -> linux/include/linux/page-flags.h
diff -r 294e032f14af -r 20c1a71383a9 xen/include/asm-ia64/linux/mmzone.h
--- a/xen/include/asm-ia64/linux/mmzone.h       Wed Mar  8 00:08:20 2006
+++ /dev/null   Wed Mar  8 00:12:39 2006
@@ -1,592 +0,0 @@
-#ifndef _LINUX_MMZONE_H
-#define _LINUX_MMZONE_H
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/config.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/cache.h>
-#include <linux/threads.h>
-#include <linux/numa.h>
-#include <linux/init.h>
-#include <asm/atomic.h>
-
-/* Free memory management - zoned buddy allocator.  */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define MAX_ORDER 11
-#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
-#endif
-
-struct free_area {
-       struct list_head        free_list;
-       unsigned long           nr_free;
-};
-
-struct pglist_data;
-
-/*
- * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
- * So add a wild amount of padding here to ensure that they fall into separate
- * cachelines.  There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
-       char x[0];
-} ____cacheline_maxaligned_in_smp;
-#define ZONE_PADDING(name)     struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
-struct per_cpu_pages {
-       int count;              /* number of pages in the list */
-       int low;                /* low watermark, refill needed */
-       int high;               /* high watermark, emptying needed */
-       int batch;              /* chunk size for buddy add/remove */
-       struct list_head list;  /* the list of pages */
-};
-
-struct per_cpu_pageset {
-       struct per_cpu_pages pcp[2];    /* 0: hot.  1: cold */
-#ifdef CONFIG_NUMA
-       unsigned long numa_hit;         /* allocated in intended node */
-       unsigned long numa_miss;        /* allocated in non intended node */
-       unsigned long numa_foreign;     /* was intended here, hit elsewhere */
-       unsigned long interleave_hit;   /* interleaver prefered this zone */
-       unsigned long local_node;       /* allocation from local node */
-       unsigned long other_node;       /* allocation from other node */
-#endif
-} ____cacheline_aligned_in_smp;
-
-#ifdef CONFIG_NUMA
-#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
-#else
-#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
-#endif
-
-#define ZONE_DMA               0
-#define ZONE_NORMAL            1
-#define ZONE_HIGHMEM           2
-
-#define MAX_NR_ZONES           3       /* Sync this with ZONES_SHIFT */
-#define ZONES_SHIFT            2       /* ceil(log2(MAX_NR_ZONES)) */
-
-
-/*
- * When a memory allocation must conform to specific limitations (such
- * as being suitable for DMA) the caller will pass in hints to the
- * allocator in the gfp_mask, in the zone modifier bits.  These bits
- * are used to select a priority ordered list of memory zones which
- * match the requested limits.  GFP_ZONEMASK defines which bits within
- * the gfp_mask should be considered as zone modifiers.  Each valid
- * combination of the zone modifier bits has a corresponding list
- * of zones (in node_zonelists).  Thus for two zone modifiers there
- * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will
- * be 8 (2 ** 3) zonelists.  GFP_ZONETYPES defines the number of possible
- * combinations of zone modifiers in "zone modifier space".
- */
-#define GFP_ZONEMASK   0x03
-/*
- * As an optimisation any zone modifier bits which are only valid when
- * no other zone modifier bits are set (loners) should be placed in
- * the highest order bits of this field.  This allows us to reduce the
- * extent of the zonelists thus saving space.  For example in the case
- * of three zone modifier bits, we could require up to eight zonelists.
- * If the left most zone modifier is a "loner" then the highest valid
- * zonelist would be four allowing us to allocate only five zonelists.
- * Use the first form when the left most bit is not a "loner", otherwise
- * use the second.
- */
-/* #define GFP_ZONETYPES       (GFP_ZONEMASK + 1) */           /* Non-loner */
-#define GFP_ZONETYPES  ((GFP_ZONEMASK + 1) / 2 + 1)            /* Loner */
-
-/*
- * On machines where it is needed (eg PCs) we divide physical memory
- * into multiple physical zones. On a PC we have 3 zones:
- *
- * ZONE_DMA      < 16 MB       ISA DMA capable memory
- * ZONE_NORMAL 16-896 MB       direct mapped by the kernel
- * ZONE_HIGHMEM         > 896 MB       only page cache and user processes
- */
-
-struct zone {
-       /* Fields commonly accessed by the page allocator */
-       unsigned long           free_pages;
-       unsigned long           pages_min, pages_low, pages_high;
-       /*
-        * We don't know if the memory that we're going to allocate will be 
freeable
-        * or/and it will be released eventually, so to avoid totally wasting 
several
-        * GB of ram we must reserve some of the lower zone memory (otherwise 
we risk
-        * to run OOM on the lower zones despite there's tons of freeable ram
-        * on the higher zones). This array is recalculated at runtime if the
-        * sysctl_lowmem_reserve_ratio sysctl changes.
-        */
-       unsigned long           lowmem_reserve[MAX_NR_ZONES];
-
-#ifdef CONFIG_NUMA
-       struct per_cpu_pageset  *pageset[NR_CPUS];
-#else
-       struct per_cpu_pageset  pageset[NR_CPUS];
-#endif
-       /*
-        * free areas of different sizes
-        */
-       spinlock_t              lock;
-       struct free_area        free_area[MAX_ORDER];
-
-
-       ZONE_PADDING(_pad1_)
-
-       /* Fields commonly accessed by the page reclaim scanner */
-       spinlock_t              lru_lock;       
-       struct list_head        active_list;
-       struct list_head        inactive_list;
-       unsigned long           nr_scan_active;
-       unsigned long           nr_scan_inactive;
-       unsigned long           nr_active;
-       unsigned long           nr_inactive;
-       unsigned long           pages_scanned;     /* since last reclaim */
-       int                     all_unreclaimable; /* All pages pinned */
-
-       /*
-        * Does the allocator try to reclaim pages from the zone as soon
-        * as it fails a watermark_ok() in __alloc_pages?
-        */
-       int                     reclaim_pages;
-       /* A count of how many reclaimers are scanning this zone */
-       atomic_t                reclaim_in_progress;
-
-       /*
-        * prev_priority holds the scanning priority for this zone.  It is
-        * defined as the scanning priority at which we achieved our reclaim
-        * target at the previous try_to_free_pages() or balance_pgdat()
-        * invokation.
-        *
-        * We use prev_priority as a measure of how much stress page reclaim is
-        * under - it drives the swappiness decision: whether to unmap mapped
-        * pages.
-        *
-        * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to free_pages == pages_high.
-        *
-        * Access to both these fields is quite racy even on uniprocessor.  But
-        * it is expected to average out OK.
-        */
-       int temp_priority;
-       int prev_priority;
-
-
-       ZONE_PADDING(_pad2_)
-       /* Rarely used or read-mostly fields */
-
-       /*
-        * wait_table           -- the array holding the hash table
-        * wait_table_size      -- the size of the hash table array
-        * wait_table_bits      -- wait_table_size == (1 << wait_table_bits)
-        *
-        * The purpose of all these is to keep track of the people
-        * waiting for a page to become available and make them
-        * runnable again when possible. The trouble is that this
-        * consumes a lot of space, especially when so few things
-        * wait on pages at a given time. So instead of using
-        * per-page waitqueues, we use a waitqueue hash table.
-        *
-        * The bucket discipline is to sleep on the same queue when
-        * colliding and wake all in that wait queue when removing.
-        * When something wakes, it must check to be sure its page is
-        * truly available, a la thundering herd. The cost of a
-        * collision is great, but given the expected load of the
-        * table, they should be so rare as to be outweighed by the
-        * benefits from the saved space.
-        *
-        * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
-        * primary users of these fields, and in mm/page_alloc.c
-        * free_area_init_core() performs the initialization of them.
-        */
-       wait_queue_head_t       * wait_table;
-       unsigned long           wait_table_size;
-       unsigned long           wait_table_bits;
-
-       /*
-        * Discontig memory support fields.
-        */
-       struct pglist_data      *zone_pgdat;
-       struct page             *zone_mem_map;
-       /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
-       unsigned long           zone_start_pfn;
-
-       unsigned long           spanned_pages;  /* total size, including holes 
*/
-       unsigned long           present_pages;  /* amount of memory (excluding 
holes) */
-
-       /*
-        * rarely used fields:
-        */
-       char                    *name;
-} ____cacheline_maxaligned_in_smp;
-
-
-/*
- * The "priority" of VM scanning is how much of the queues we will scan in one
- * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
- * queues ("queue_length >> 12") during an aging round.
- */
-#define DEF_PRIORITY 12
-
-/*
- * One allocation request operates on a zonelist. A zonelist
- * is a list of zones, the first one is the 'goal' of the
- * allocation, the other zones are fallback zones, in decreasing
- * priority.
- *
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
- */
-struct zonelist {
-       struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
-};
-
-
-/*
- * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
- * (mostly NUMA machines?) to denote a higher-level memory zone than the
- * zone denotes.
- *
- * On NUMA machines, each NUMA node would have a pg_data_t to describe
- * it's memory layout.
- *
- * Memory statistics and page replacement data structures are maintained on a
- * per-zone basis.
- */
-struct bootmem_data;
-typedef struct pglist_data {
-       struct zone node_zones[MAX_NR_ZONES];
-       struct zonelist node_zonelists[GFP_ZONETYPES];
-       int nr_zones;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-       struct page *node_mem_map;
-#endif
-       struct bootmem_data *bdata;
-       unsigned long node_start_pfn;
-       unsigned long node_present_pages; /* total number of physical pages */
-       unsigned long node_spanned_pages; /* total size of physical page
-                                            range, including holes */
-       int node_id;
-       struct pglist_data *pgdat_next;
-       wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
-       int kswapd_max_order;
-} pg_data_t;
-
-#define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
-#define node_spanned_pages(nid)        (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-#define pgdat_page_nr(pgdat, pagenr)   ((pgdat)->node_mem_map + (pagenr))
-#else
-#define pgdat_page_nr(pgdat, pagenr)   mfn_to_page((pgdat)->node_start_pfn + 
(pagenr))
-#endif
-#define nid_page_nr(nid, pagenr)       pgdat_page_nr(NODE_DATA(nid),(pagenr))
-
-extern struct pglist_data *pgdat_list;
-
-void __get_zone_counts(unsigned long *active, unsigned long *inactive,
-                       unsigned long *free, struct pglist_data *pgdat);
-void get_zone_counts(unsigned long *active, unsigned long *inactive,
-                       unsigned long *free);
-void build_all_zonelists(void);
-void wakeup_kswapd(struct zone *zone, int order);
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
-               int alloc_type, int can_try_harder, int gfp_high);
-
-#ifdef CONFIG_HAVE_MEMORY_PRESENT
-void memory_present(int nid, unsigned long start, unsigned long end);
-#else
-static inline void memory_present(int nid, unsigned long start, unsigned long 
end) {}
-#endif
-
-#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-#endif
-
-/*
- * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
- */
-#define zone_idx(zone)         ((zone) - (zone)->zone_pgdat->node_zones)
-
-/**
- * for_each_pgdat - helper macro to iterate over all nodes
- * @pgdat - pointer to a pg_data_t variable
- *
- * Meant to help with common loops of the form
- * pgdat = pgdat_list;
- * while(pgdat) {
- *     ...
- *     pgdat = pgdat->pgdat_next;
- * }
- */
-#define for_each_pgdat(pgdat) \
-       for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
-
-/*
- * next_zone - helper magic for for_each_zone()
- * Thanks to William Lee Irwin III for this piece of ingenuity.
- */
-static inline struct zone *next_zone(struct zone *zone)
-{
-       pg_data_t *pgdat = zone->zone_pgdat;
-
-       if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
-               zone++;
-       else if (pgdat->pgdat_next) {
-               pgdat = pgdat->pgdat_next;
-               zone = pgdat->node_zones;
-       } else
-               zone = NULL;
-
-       return zone;
-}
-
-/**
- * for_each_zone - helper macro to iterate over all memory zones
- * @zone - pointer to struct zone variable
- *
- * The user only needs to declare the zone variable, for_each_zone
- * fills it in. This basically means for_each_zone() is an
- * easier to read version of this piece of code:
- *
- * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
- *     for (i = 0; i < MAX_NR_ZONES; ++i) {
- *             struct zone * z = pgdat->node_zones + i;
- *             ...
- *     }
- * }
- */
-#define for_each_zone(zone) \
-       for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
-
-static inline int is_highmem_idx(int idx)
-{
-       return (idx == ZONE_HIGHMEM);
-}
-
-static inline int is_normal_idx(int idx)
-{
-       return (idx == ZONE_NORMAL);
-}
-/**
- * is_highmem - helper function to quickly check if a struct zone is a 
- *              highmem zone or not.  This is an attempt to keep references
- *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
- */
-static inline int is_highmem(struct zone *zone)
-{
-       return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
-}
-
-static inline int is_normal(struct zone *zone)
-{
-       return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
-}
-
-/* These two functions are used to setup the per zone pages min values */
-struct ctl_table;
-struct file;
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 
-                                       void __user *, size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
-                                       void __user *, size_t *, loff_t *);
-
-#include <linux/topology.h>
-/* Returns the number of the current Node. */
-#define numa_node_id()         (cpu_to_node(raw_smp_processor_id()))
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-
-extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid)         (&contig_page_data)
-#define NODE_MEM_MAP(nid)      mem_map
-#define MAX_NODES_SHIFT                1
-#define pfn_to_nid(pfn)                (0)
-
-#else /* CONFIG_NEED_MULTIPLE_NODES */
-
-#include <asm/mmzone.h>
-
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
-
-#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
-/*
- * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
- * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
- */
-#define FLAGS_RESERVED         8
-
-#elif BITS_PER_LONG == 64
-/*
- * with 64 bit flags field, there's plenty of room.
- */
-#define FLAGS_RESERVED         32
-
-#else
-
-#error BITS_PER_LONG not defined
-
-#endif
-
-#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
-#define early_pfn_to_nid(nid)  (0UL)
-#endif
-
-#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
-#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
-
-#ifdef CONFIG_SPARSEMEM
-
-/*
- * SECTION_SHIFT               #bits space required to store a section #
- *
- * PA_SECTION_SHIFT            physical address to/from section number
- * PFN_SECTION_SHIFT           pfn to/from section number
- */
-#define SECTIONS_SHIFT         (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
-
-#define PA_SECTION_SHIFT       (SECTION_SIZE_BITS)
-#define PFN_SECTION_SHIFT      (SECTION_SIZE_BITS - PAGE_SHIFT)
-
-#define NR_MEM_SECTIONS                (1UL << SECTIONS_SHIFT)
-
-#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
-#define PAGE_SECTION_MASK      (~(PAGES_PER_SECTION-1))
-
-#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
-#endif
-
-struct page;
-struct mem_section {
-       /*
-        * This is, logically, a pointer to an array of struct
-        * pages.  However, it is stored with some other magic.
-        * (see sparse.c::sparse_init_one_section())
-        *
-        * Making it a UL at least makes someone do a cast
-        * before using it wrong.
-        */
-       unsigned long section_mem_map;
-};
-
-extern struct mem_section mem_section[NR_MEM_SECTIONS];
-
-static inline struct mem_section *__nr_to_section(unsigned long nr)
-{
-       return &mem_section[nr];
-}
-
-/*
- * We use the lower bits of the mem_map pointer to store
- * a little bit of information.  There should be at least
- * 3 bits here due to 32-bit alignment.
- */
-#define        SECTION_MARKED_PRESENT  (1UL<<0)
-#define SECTION_HAS_MEM_MAP    (1UL<<1)
-#define SECTION_MAP_LAST_BIT   (1UL<<2)
-#define SECTION_MAP_MASK       (~(SECTION_MAP_LAST_BIT-1))
-
-static inline struct page *__section_mem_map_addr(struct mem_section *section)
-{
-       unsigned long map = section->section_mem_map;
-       map &= SECTION_MAP_MASK;
-       return (struct page *)map;
-}
-
-static inline int valid_section(struct mem_section *section)
-{
-       return (section->section_mem_map & SECTION_MARKED_PRESENT);
-}
-
-static inline int section_has_mem_map(struct mem_section *section)
-{
-       return (section->section_mem_map & SECTION_HAS_MEM_MAP);
-}
-
-static inline int valid_section_nr(unsigned long nr)
-{
-       return valid_section(__nr_to_section(nr));
-}
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr)   pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
-static inline struct mem_section *__pfn_to_section(unsigned long pfn)
-{
-       return __nr_to_section(pfn_to_section_nr(pfn));
-}
-
-#define mfn_to_page(pfn)                                               \
-({                                                                     \
-       unsigned long __pfn = (pfn);                                    \
-       __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn;        \
-})
-#define page_to_mfn(page)                                              \
-({                                                                     \
-       page - __section_mem_map_addr(__nr_to_section(                  \
-               page_to_section(page)));                                \
-})
-
-static inline int mfn_valid(unsigned long pfn)
-{
-       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
-               return 0;
-       return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
-}
-
-/*
- * These are _only_ used during initialisation, therefore they
- * can use __initdata ...  They could have names to indicate
- * this restriction.
- */
-#ifdef CONFIG_NUMA
-#define pfn_to_nid             early_pfn_to_nid
-#endif
-
-#define pfn_to_pgdat(pfn)                                              \
-({                                                                     \
-       NODE_DATA(pfn_to_nid(pfn));                                     \
-})
-
-#define early_mfn_valid(pfn)   mfn_valid(pfn)
-void sparse_init(void);
-#else
-#define sparse_init()  do {} while (0)
-#endif /* CONFIG_SPARSEMEM */
-
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-#define early_pfn_in_nid(pfn, nid)     (early_pfn_to_nid(pfn) == (nid))
-#else
-#define early_pfn_in_nid(pfn, nid)     (1)
-#endif
-
-#ifndef early_mfn_valid
-#define early_mfn_valid(pfn)   (1)
-#endif
-
-void memory_present(int nid, unsigned long start, unsigned long end);
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-
-#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* _LINUX_MMZONE_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.