[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Include byteorder functions from Linux.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1169035862 0
# Node ID fe76b80d081aef701299050e9e78575031290996
# Parent  8a397303fe09e7ae29fab703c416420f8902085e
[XEN] Include byteorder functions from Linux.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/include/asm-powerpc/byteorder.h       |   80 ++++++++++++
 xen/include/asm-x86/byteorder.h           |   36 +++++
 xen/include/xen/byteorder/big_endian.h    |  106 +++++++++++++++++
 xen/include/xen/byteorder/generic.h       |   68 +++++++++++
 xen/include/xen/byteorder/little_endian.h |  106 +++++++++++++++++
 xen/include/xen/byteorder/swab.h          |  185 ++++++++++++++++++++++++++++++
 xen/include/xen/config.h                  |    2 
 xen/include/xen/types.h                   |    7 +
 8 files changed, 590 insertions(+)

diff -r 8a397303fe09 -r fe76b80d081a xen/include/asm-powerpc/byteorder.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-powerpc/byteorder.h       Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,80 @@
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __u16 ld_le16(const volatile __u16 *addr)
+{
+    __u16 val;
+
+    asm volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+    return val;
+}
+
+static inline void st_le16(volatile __u16 *addr, const __u16 val)
+{
+    asm volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __u32 ld_le32(const volatile __u32 *addr)
+{
+    __u32 val;
+
+    asm volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+    return val;
+}
+
+static inline void st_le32(volatile __u32 *addr, const __u32 val)
+{
+    asm volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline __attribute_const__ __u16 ___arch__swab16(__u16 value)
+{
+    __u16 result;
+
+    asm("rlwimi %0,%1,8,16,23"
+        : "=r" (result)
+        : "r" (value), "0" (value >> 8));
+    return result;
+}
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 value)
+{
+    __u32 result;
+
+    asm("rlwimi %0,%1,24,16,23\n\t"
+        "rlwimi %0,%1,8,8,15\n\t"
+        "rlwimi %0,%1,24,0,7"
+        : "=r" (result)
+        : "r" (value), "0" (value >> 24));
+    return result;
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#define __BYTEORDER_HAS_U64__
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+
+#include <xen/byteorder/big_endian.h>
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/asm-x86/byteorder.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/byteorder.h   Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,36 @@
+#ifndef __ASM_X86_BYTEORDER_H__
+#define __ASM_X86_BYTEORDER_H__
+
+#include <asm/types.h>
+#include <xen/compiler.h>
+
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+    asm("bswap %0" : "=r" (x) : "0" (x));
+    return x;
+}
+
+static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
+{ 
+    union { 
+        struct { __u32 a,b; } s;
+        __u64 u;
+    } v;
+    v.u = val;
+    asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 
+        : "=r" (v.s.a), "=r" (v.s.b) 
+        : "0" (v.s.a), "1" (v.s.b)); 
+    return v.u;        
+} 
+
+/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
+   convert it into rotation or exhange.  */
+
+#define __arch__swab64(x) ___arch__swab64(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+#endif /* __ASM_X86_BYTEORDER_H__ */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/byteorder/big_endian.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/big_endian.h    Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
+#define __XEN_BYTEORDER_BIG_ENDIAN_H__
+
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#endif
+#ifndef __BIG_ENDIAN_BITFIELD
+#define __BIG_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)(__u32)(x))
+#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
+#define __constant_htons(x) ((__force __be16)(__u16)(x))
+#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force 
__u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force 
__u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
+#define __constant_le16_to_cpu(x) ___constant_swab16((__force 
__u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
+#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
+#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
+#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+    return (__force __le64)__swab64p(p);
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+    return __swab64p((__u64 *)p);
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+    return (__force __le32)__swab32p(p);
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+    return __swab32p((__u32 *)p);
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+    return (__force __le16)__swab16p(p);
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+    return __swab16p((__u16 *)p);
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+    return (__force __be64)*p;
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+    return (__force __u64)*p;
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+    return (__force __be32)*p;
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+    return (__force __u32)*p;
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+    return (__force __be16)*p;
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+    return (__force __u16)*p;
+}
+#define __cpu_to_le64s(x) __swab64s((x))
+#define __le64_to_cpus(x) __swab64s((x))
+#define __cpu_to_le32s(x) __swab32s((x))
+#define __le32_to_cpus(x) __swab32s((x))
+#define __cpu_to_le16s(x) __swab16s((x))
+#define __le16_to_cpus(x) __swab16s((x))
+#define __cpu_to_be64s(x) do {} while (0)
+#define __be64_to_cpus(x) do {} while (0)
+#define __cpu_to_be32s(x) do {} while (0)
+#define __be32_to_cpus(x) do {} while (0)
+#define __cpu_to_be16s(x) do {} while (0)
+#define __be16_to_cpus(x) do {} while (0)
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/byteorder/generic.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/generic.h       Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,68 @@
+#ifndef __XEN_BYTEORDER_GENERIC_H__
+#define __XEN_BYTEORDER_GENERIC_H__
+
+/*
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on 
+ * some architectures.  Use get_unaligned for unaligned data.
+ *
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ *     cpu_to_[bl]eXX(__uXX x)
+ *     [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ *     cpu_to_[bl]eXXp(__uXX x)
+ *     [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ *     cpu_to_[bl]eXXs(__uXX x)
+ *     [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ */
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+#endif /* __XEN_BYTEORDER_GENERIC_H__ */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/byteorder/little_endian.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/little_endian.h Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,106 @@
+#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#include <xen/types.h>
+#include <xen/byteorder/swab.h>
+
+#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
+#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force 
__u64)(__be64)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force 
__u32)(__be32)(x))
+#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
+#define __constant_be16_to_cpu(x) ___constant_swab16((__force 
__u16)(__be16)(x))
+#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
+#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
+#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
+#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+    return (__force __le64)*p;
+}
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+    return (__force __u64)*p;
+}
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+    return (__force __le32)*p;
+}
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+    return (__force __u32)*p;
+}
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+    return (__force __le16)*p;
+}
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+    return (__force __u16)*p;
+}
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+    return (__force __be64)__swab64p(p);
+}
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+    return __swab64p((__u64 *)p);
+}
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+    return (__force __be32)__swab32p(p);
+}
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+    return __swab32p((__u32 *)p);
+}
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+    return (__force __be16)__swab16p(p);
+}
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+    return __swab16p((__u16 *)p);
+}
+#define __cpu_to_le64s(x) do {} while (0)
+#define __le64_to_cpus(x) do {} while (0)
+#define __cpu_to_le32s(x) do {} while (0)
+#define __le32_to_cpus(x) do {} while (0)
+#define __cpu_to_le16s(x) do {} while (0)
+#define __le16_to_cpus(x) do {} while (0)
+#define __cpu_to_be64s(x) __swab64s((x))
+#define __be64_to_cpus(x) __swab64s((x))
+#define __cpu_to_be32s(x) __swab32s((x))
+#define __be32_to_cpus(x) __swab32s((x))
+#define __cpu_to_be16s(x) __swab16s((x))
+#define __be16_to_cpus(x) __swab16s((x))
+
+#include <xen/byteorder/generic.h>
+
+#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/byteorder/swab.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/byteorder/swab.h  Wed Jan 17 12:11:02 2007 +0000
@@ -0,0 +1,185 @@
+#ifndef __XEN_BYTEORDER_SWAB_H__
+#define __XEN_BYTEORDER_SWAB_H__
+
+/*
+ * Byte-swapping, independently from CPU endianness
+ *     swabXX[ps]?(foo)
+ *
+ * Francois-Rene Rideau <fare@xxxxxxxxx> 19971205
+ *    separated swab functions from cpu_to_XX,
+ *    to clean up support for bizarre-endian architectures.
+ */
+
+#include <xen/compiler.h>
+
+/* casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define ___swab16(x)                                    \
+({                                                      \
+    __u16 __x = (x);                                    \
+    ((__u16)(                                           \
+        (((__u16)(__x) & (__u16)0x00ffU) << 8) |        \
+        (((__u16)(__x) & (__u16)0xff00U) >> 8) ));      \
+})
+
+#define ___swab32(x)                                            \
+({                                                              \
+    __u32 __x = (x);                                            \
+    ((__u32)(                                                   \
+        (((__u32)(__x) & (__u32)0x000000ffUL) << 24) |          \
+        (((__u32)(__x) & (__u32)0x0000ff00UL) <<  8) |          \
+        (((__u32)(__x) & (__u32)0x00ff0000UL) >>  8) |          \
+        (((__u32)(__x) & (__u32)0xff000000UL) >> 24) ));        \
+})
+
+#define ___swab64(x)                                                       \
+({                                                                         \
+    __u64 __x = (x);                                                       \
+    ((__u64)(                                                              \
+        (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) |     \
+        (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) |     \
+        (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
+        (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
+            (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >>  8) | \
+        (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
+        (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
+        (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) ));   \
+})
+
+#define ___constant_swab16(x)                   \
+    ((__u16)(                                   \
+        (((__u16)(x) & (__u16)0x00ffU) << 8) |  \
+        (((__u16)(x) & (__u16)0xff00U) >> 8) ))
+#define ___constant_swab32(x)                           \
+    ((__u32)(                                           \
+        (((__u32)(x) & (__u32)0x000000ffUL) << 24) |    \
+        (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |    \
+        (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |    \
+        (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+#define ___constant_swab64(x)                                            \
+    ((__u64)(                                                            \
+        (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |     \
+        (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |     \
+        (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
+        (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
+            (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
+        (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
+        (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
+        (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+
+/*
+ * provide defaults when no architecture-specific optimization is detected
+ */
+#ifndef __arch__swab16
+#  define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+#endif
+#ifndef __arch__swab32
+#  define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+#endif
+#ifndef __arch__swab64
+#  define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+#endif
+
+#ifndef __arch__swab16p
+#  define __arch__swab16p(x) __arch__swab16(*(x))
+#endif
+#ifndef __arch__swab32p
+#  define __arch__swab32p(x) __arch__swab32(*(x))
+#endif
+#ifndef __arch__swab64p
+#  define __arch__swab64p(x) __arch__swab64(*(x))
+#endif
+
+#ifndef __arch__swab16s
+#  define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+#endif
+#ifndef __arch__swab32s
+#  define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+#endif
+#ifndef __arch__swab64s
+#  define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+#endif
+
+
+/*
+ * Allow constant folding
+ */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+#  define __swab16(x) \
+(__builtin_constant_p((__u16)(x)) ? \
+ ___swab16((x)) : \
+ __fswab16((x)))
+#  define __swab32(x) \
+(__builtin_constant_p((__u32)(x)) ? \
+ ___swab32((x)) : \
+ __fswab32((x)))
+#  define __swab64(x) \
+(__builtin_constant_p((__u64)(x)) ? \
+ ___swab64((x)) : \
+ __fswab64((x)))
+#else
+#  define __swab16(x) __fswab16(x)
+#  define __swab32(x) __fswab32(x)
+#  define __swab64(x) __fswab64(x)
+#endif /* OPTIMIZE */
+
+
+static inline __attribute_const__ __u16 __fswab16(__u16 x)
+{
+    return __arch__swab16(x);
+}
+static inline __u16 __swab16p(const __u16 *x)
+{
+    return __arch__swab16p(x);
+}
+static inline void __swab16s(__u16 *addr)
+{
+    __arch__swab16s(addr);
+}
+
+static inline __attribute_const__ __u32 __fswab32(__u32 x)
+{
+    return __arch__swab32(x);
+}
+static inline __u32 __swab32p(const __u32 *x)
+{
+    return __arch__swab32p(x);
+}
+static inline void __swab32s(__u32 *addr)
+{
+    __arch__swab32s(addr);
+}
+
+#ifdef __BYTEORDER_HAS_U64__
+static inline __attribute_const__ __u64 __fswab64(__u64 x)
+{
+#  ifdef __SWAB_64_THRU_32__
+    __u32 h = x >> 32;
+        __u32 l = x & ((1ULL<<32)-1);
+        return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+#  else
+    return __arch__swab64(x);
+#  endif
+}
+static inline __u64 __swab64p(const __u64 *x)
+{
+    return __arch__swab64p(x);
+}
+static inline void __swab64s(__u64 *addr)
+{
+    __arch__swab64s(addr);
+}
+#endif /* __BYTEORDER_HAS_U64__ */
+
+#define swab16 __swab16
+#define swab32 __swab32
+#define swab64 __swab64
+#define swab16p __swab16p
+#define swab32p __swab32p
+#define swab64p __swab64p
+#define swab16s __swab16s
+#define swab32s __swab32s
+#define swab64s __swab64s
+
+#endif /* __XEN_BYTEORDER_SWAB_H__ */
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/config.h
--- a/xen/include/xen/config.h  Wed Jan 17 10:33:13 2007 +0000
+++ b/xen/include/xen/config.h  Wed Jan 17 12:11:02 2007 +0000
@@ -63,6 +63,8 @@
 /* Linux 'checker' project. */
 #define __iomem
 #define __user
+#define __force
+#define __bitwise
 
 #ifndef __ASSEMBLY__
 
diff -r 8a397303fe09 -r fe76b80d081a xen/include/xen/types.h
--- a/xen/include/xen/types.h   Wed Jan 17 10:33:13 2007 +0000
+++ b/xen/include/xen/types.h   Wed Jan 17 12:11:02 2007 +0000
@@ -51,4 +51,11 @@ struct domain;
 struct domain;
 struct vcpu;
 
+typedef __u16 __le16;
+typedef __u16 __be16;
+typedef __u32 __le32;
+typedef __u32 __be32;
+typedef __u64 __le64;
+typedef __u64 __be64;
+
 #endif /* __TYPES_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.