[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] another one to remove unused linux files within xen/ia64



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID a2bb5a3242a1449c132cbd64950582d054c2a627
# Parent  b7e2628695e316bfb20c700128cd0551a8d93137
[IA64] another one to remove unused linux files within xen/ia64

This patch is to remove more unused linux files inside xen and also some
duplicated ones, like slab.h and cpumask.h. This should help reduce
duplicated definitions conflicting with xen's.

Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r b7e2628695e3 -r a2bb5a3242a1 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Fri Feb 24 17:32:27 2006
+++ b/xen/include/asm-ia64/config.h     Fri Feb 24 17:50:27 2006
@@ -211,6 +211,9 @@
 
 // see include/asm-ia64/mm.h, handle remaining page_info uses until gone
 #define page_info page
+// Deprivated linux inf and put here for short time compatibility
+#define kmalloc(s, t) xmalloc_bytes((s))
+#define kfree(s) xfree((s))
 
 // see common/keyhandler.c
 #define        nop()   asm volatile ("nop 0")
diff -r b7e2628695e3 -r a2bb5a3242a1 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Feb 24 17:32:27 2006
+++ b/xen/include/asm-ia64/domain.h     Fri Feb 24 17:50:27 2006
@@ -9,6 +9,7 @@
 #include <public/arch-ia64.h>
 #include <asm/vmx_platform.h>
 #include <xen/list.h>
+#include <xen/cpumask.h>
 
 extern void domain_relinquish_resources(struct domain *);
 
diff -r b7e2628695e3 -r a2bb5a3242a1 
xen/include/asm-ia64/linux-xen/linux/README.origin
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin        Fri Feb 24 
17:32:27 2006
+++ b/xen/include/asm-ia64/linux-xen/linux/README.origin        Fri Feb 24 
17:50:27 2006
@@ -5,7 +5,6 @@
 # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
 # easily updated to future versions of the corresponding Linux files.
 
-cpumask.h              -> linux/include/linux/cpumask.h
 gfp.h                  -> linux/include/linux/gfp.h
 hardirq.h              -> linux/include/linux/hardirq.h
 interrupt.h            -> linux/include/linux/interrupt.h
diff -r b7e2628695e3 -r a2bb5a3242a1 xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin  Fri Feb 24 17:32:27 2006
+++ b/xen/include/asm-ia64/linux/README.origin  Fri Feb 24 17:50:27 2006
@@ -23,7 +23,6 @@
 rbtree.h               -> linux/include/linux/rbtree.h
 rwsem.h                        -> linux/include/linux/rwsem.h
 seqlock.h              -> linux/include/linux/seqlock.h
-slab.h                 -> linux/include/linux/slab.h
 sort.h                 -> linux/include/linux/sort.h
 stddef.h               -> linux/include/linux/stddef.h
 thread_info.h          -> linux/include/linux/thread_info.h
diff -r b7e2628695e3 -r a2bb5a3242a1 
xen/include/asm-ia64/linux-xen/linux/cpumask.h
--- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h    Fri Feb 24 17:32:27 2006
+++ /dev/null   Fri Feb 24 17:50:27 2006
@@ -1,397 +0,0 @@
-#ifndef __LINUX_CPUMASK_H
-#define __LINUX_CPUMASK_H
-
-/*
- * Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
- * For details of cpulist_scnprintf() and cpulist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- *
- * The available cpumask operations are:
- *
- * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
- * void cpus_setall(mask)              set all bits
- * void cpus_clear(mask)               clear all bits
- * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
- * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
- * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
- * void cpus_complement(dst, src)      dst = ~src
- *
- * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2)       Is mask1 a subset of mask2?
- * int cpus_empty(mask)                        Is mask empty (no bits sets)?
- * int cpus_full(mask)                 Is mask full (all bits sets)?
- * int cpus_weight(mask)               Hamming weigh - number of set bits
- *
- * void cpus_shift_right(dst, src, n)  Shift right
- * void cpus_shift_left(dst, src, n)   Shift left
- *
- * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
- *
- * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
- * CPU_MASK_ALL                                Initializer - all bits set
- * CPU_MASK_NONE                       Initializer - no bits set
- * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
- * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
- * int cpulist_parse(buf, map)         Parse ascii string as cpulist
- *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
- *
- * int num_online_cpus()               Number of online CPUs
- * int num_possible_cpus()             Number of all possible CPUs
- * int num_present_cpus()              Number of present CPUs
- *
- * int cpu_online(cpu)                 Is some cpu online?
- * int cpu_possible(cpu)               Is some cpu possible?
- * int cpu_present(cpu)                        Is some cpu present (can 
schedule)?
- *
- * int any_online_cpu(mask)            First online cpu in mask
- *
- * for_each_cpu(cpu)                   for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu)            for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu)           for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- *    to generate slightly worse code.  Note for example the additional
- *    40 lines of assembly code compiling the "for each possible cpu"
- *    loops buried in the disk_stat_read() macros calls when compiling
- *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
- *    one-line #define for cpu_isset(), instead of wrapping an inline
- *    inside a macro, the way we do the other calls.
- */
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/bitmap.h>
-#include <asm/bug.h>
-
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
-       set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
-       clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
-       bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
-       bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
-       return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int nbits)
-{
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), 
NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
-{
-       return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
-       return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
-                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
-{
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
-}
-
-#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
-{
-       return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
-}
-
-#define cpumask_of_cpu(cpu)                                            \
-({                                                                     \
-       typeof(_unused_cpumask_arg_) m;                                 \
-       if (sizeof(m) == sizeof(unsigned long)) {                       \
-               m.bits[0] = 1UL<<(cpu);                                 \
-       } else {                                                        \
-               cpus_clear(m);                                          \
-               cpu_set((cpu), m);                                      \
-       }                                                               \
-       m;                                                              \
-})
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#else
-
-#define CPU_MASK_ALL                                                   \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,                        \
-       [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD                 \
-} }
-
-#endif
-
-#define CPU_MASK_NONE                                                  \
-(cpumask_t) { {                                                                
\
-       [0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL                         \
-} }
-
-#define CPU_MASK_CPU0                                                  \
-(cpumask_t) { {                                                                
\
-       [0] =  1UL                                                      \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpumask_scnprintf(buf, len, src) \
-                       __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpumask_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
-{
-       return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpumask_parse(ubuf, ulen, dst) \
-                       __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
-static inline int __cpumask_parse(const char __user *buf, int len,
-                                       cpumask_t *dstp, int nbits)
-{
-       return bitmap_parse(buf, len, dstp->bits, nbits);
-}
-
-#define cpulist_scnprintf(buf, len, src) \
-                       __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
-static inline int __cpulist_scnprintf(char *buf, int len,
-                                       const cpumask_t *srcp, int nbits)
-{
-       return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
-}
-
-#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
-static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
-{
-       return bitmap_parselist(buf, dstp->bits, nbits);
-}
-
-#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < NR_CPUS;                \
-               (cpu) = next_cpu((cpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
-#endif /* NR_CPUS */
-
-/*
- * The following particular system cpumasks and operations manage
- * possible, present and online cpus.  Each of them is a fixed size
- * bitmap of size NR_CPUS.
- *
- *  #ifdef CONFIG_HOTPLUG_CPU
- *     cpu_possible_map - all NR_CPUS bits set
- *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #else
- *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
- *     cpu_present_map  - copy of cpu_possible_map
- *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
- *  #endif
- *
- *  In either case, NR_CPUS is fixed at compile time, as the static
- *  size of these bitmaps.  The cpu_possible_map is fixed at boot
- *  time, as the set of CPU id's that it is possible might ever
- *  be plugged in at anytime during the life of that system boot.
- *  The cpu_present_map is dynamic(*), representing which CPUs
- *  are currently plugged in.  And cpu_online_map is the dynamic
- *  subset of cpu_present_map, indicating those CPUs available
- *  for scheduling.
- *
- *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
- *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
- *  ACPI reports present at boot.
- *
- *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
- *  depending on what ACPI reports as currently plugged in, otherwise
- *  cpu_present_map is just a copy of cpu_possible_map.
- *
- *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
- *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
- *
- * Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
- *    assumption that their single CPU is online.  The UP
- *    cpu_{online,possible,present}_maps are placebos.  Changing them
- *    will have no useful affect on the following num_*_cpus()
- *    and cpu_*() macros in the UP case.  This ugliness is a UP
- *    optimization - don't waste any instructions or memory references
- *    asking if you're online or how many CPUs there are if there is
- *    only one CPU.
- * 2) Most SMP arch's #define some of these maps to be some
- *    other map specific to that arch.  Therefore, the following
- *    must be #define macros, not inlines.  To see why, examine
- *    the assembly code produced by the following.  Note that
- *    set1() writes phys_x_map, but set2() writes x_map:
- *        int x_map, phys_x_map;
- *        #define set1(a) x_map = a
- *        inline void set2(int a) { x_map = a; }
- *        #define x_map phys_x_map
- *        main(){ set1(3); set2(5); }
- */
-
-extern cpumask_t cpu_possible_map;
-#ifndef XEN
-extern cpumask_t cpu_online_map;
-#endif
-extern cpumask_t cpu_present_map;
-
-#if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
-#define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
-#else
-#define num_online_cpus()      1
-#define num_possible_cpus()    1
-#define num_present_cpus()     1
-#define cpu_online(cpu)                ((cpu) == 0)
-#define cpu_possible(cpu)      ((cpu) == 0)
-#define cpu_present(cpu)       ((cpu) == 0)
-#endif
-
-#define any_online_cpu(mask)                   \
-({                                             \
-       int cpu;                                \
-       for_each_cpu_mask(cpu, (mask))          \
-               if (cpu_online(cpu))            \
-                       break;                  \
-       cpu;                                    \
-})
-
-#define for_each_cpu(cpu)        for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
-
-#endif /* __LINUX_CPUMASK_H */
diff -r b7e2628695e3 -r a2bb5a3242a1 xen/include/asm-ia64/linux/slab.h
--- a/xen/include/asm-ia64/linux/slab.h Fri Feb 24 17:32:27 2006
+++ /dev/null   Fri Feb 24 17:50:27 2006
@@ -1,137 +0,0 @@
-/*
- * linux/mm/slab.h
- * Written by Mark Hemment, 1996.
- * (markhe@xxxxxxxxxxxxxxxxx)
- */
-
-#ifndef _LINUX_SLAB_H
-#define        _LINUX_SLAB_H
-
-#if    defined(__KERNEL__)
-
-typedef struct kmem_cache_s kmem_cache_t;
-
-#include       <linux/config.h>        /* kmalloc_sizes.h needs CONFIG_ 
options */
-#include       <linux/gfp.h>
-#include       <linux/init.h>
-#include       <linux/types.h>
-#include       <asm/page.h>            /* kmalloc_sizes.h needs PAGE_SIZE */
-#include       <asm/cache.h>           /* kmalloc_sizes.h needs L1_CACHE_BYTES 
*/
-
-/* flags for kmem_cache_alloc() */
-#define        SLAB_NOFS               GFP_NOFS
-#define        SLAB_NOIO               GFP_NOIO
-#define        SLAB_ATOMIC             GFP_ATOMIC
-#define        SLAB_USER               GFP_USER
-#define        SLAB_KERNEL             GFP_KERNEL
-#define        SLAB_DMA                GFP_DMA
-
-#define SLAB_LEVEL_MASK                GFP_LEVEL_MASK
-
-#define        SLAB_NO_GROW            __GFP_NO_GROW   /* don't grow a cache */
-
-/* flags to pass to kmem_cache_create().
- * The first 3 are only valid when the allocator as been build
- * SLAB_DEBUG_SUPPORT.
- */
-#define        SLAB_DEBUG_FREE         0x00000100UL    /* Peform (expensive) 
checks on free */
-#define        SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor (as 
verifier) */
-#define        SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a 
cache */
-#define        SLAB_POISON             0x00000800UL    /* Poison objects */
-#define        SLAB_NO_REAP            0x00001000UL    /* never reap from the 
cache */
-#define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w 
cache lines */
-#define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
-#define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
-#define SLAB_STORE_USER                0x00010000UL    /* store the last owner 
for bug hunting */
-#define SLAB_RECLAIM_ACCOUNT   0x00020000UL    /* track pages allocated to 
indicate
-                                                  what is reclaimable later*/
-#define SLAB_PANIC             0x00040000UL    /* panic if kmem_cache_create() 
fails */
-#define SLAB_DESTROY_BY_RCU    0x00080000UL    /* defer freeing pages to RCU */
-
-/* flags passed to a constructor func */
-#define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then 
deconstructor */
-#define SLAB_CTOR_ATOMIC       0x002UL         /* tell constructor it can't 
sleep */
-#define        SLAB_CTOR_VERIFY        0x004UL         /* tell constructor 
it's a verify call */
-
-/* prototypes */
-extern void __init kmem_cache_init(void);
-
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned 
long,
-                                      void (*)(void *, kmem_cache_t *, 
unsigned long),
-                                      void (*)(void *, kmem_cache_t *, 
unsigned long));
-extern int kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
-extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int 
__nocast gfpflags);
-
-/* Size description struct for general caches. */
-struct cache_sizes {
-       size_t           cs_size;
-       kmem_cache_t    *cs_cachep;
-       kmem_cache_t    *cs_dmacachep;
-};
-extern struct cache_sizes malloc_sizes[];
-extern void *__kmalloc(size_t, unsigned int __nocast);
-
-static inline void *kmalloc(size_t size, unsigned int __nocast flags)
-{
-       if (__builtin_constant_p(size)) {
-               int i = 0;
-#define CACHE(x) \
-               if (size <= x) \
-                       goto found; \
-               else \
-                       i++;
-#include "kmalloc_sizes.h"
-#undef CACHE
-               {
-                       extern void __you_cannot_kmalloc_that_much(void);
-                       __you_cannot_kmalloc_that_much();
-               }
-found:
-               return kmem_cache_alloc((flags & GFP_DMA) ?
-                       malloc_sizes[i].cs_dmacachep :
-                       malloc_sizes[i].cs_cachep, flags);
-       }
-       return __kmalloc(size, flags);
-}
-
-extern void *kcalloc(size_t, size_t, unsigned int __nocast);
-extern void kfree(const void *);
-extern unsigned int ksize(const void *);
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
-extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
-#else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int 
node)
-{
-       return kmem_cache_alloc(cachep, flags);
-}
-static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int 
node)
-{
-       return kmalloc(size, flags);
-}
-#endif
-
-extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
-
-/* System wide caches */
-extern kmem_cache_t    *vm_area_cachep;
-extern kmem_cache_t    *names_cachep;
-extern kmem_cache_t    *files_cachep;
-extern kmem_cache_t    *filp_cachep;
-extern kmem_cache_t    *fs_cachep;
-extern kmem_cache_t    *signal_cachep;
-extern kmem_cache_t    *sighand_cachep;
-extern kmem_cache_t    *bio_cachep;
-
-extern atomic_t slab_reclaim_pages;
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_SLAB_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.