[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] move xen atomic.h from linux under linux-xen to modify.



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID fa4281cb7a5b27790d4ff0d2692f9055378b1694
# Parent  622bb65e2011c5de5d0c0718df8b3328a3c783e2
[IA64] move xen atomic.h from linux under linux-xen to modify.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/include/asm-ia64/linux/asm/atomic.h          |  183 -----------------------
 xen/include/asm-ia64/linux-xen/asm/README.origin |    1 
 xen/include/asm-ia64/linux-xen/asm/atomic.h      |  183 +++++++++++++++++++++++
 xen/include/asm-ia64/linux/asm/README.origin     |    1 
 4 files changed, 184 insertions(+), 184 deletions(-)

diff -r 622bb65e2011 -r fa4281cb7a5b 
xen/include/asm-ia64/linux-xen/asm/README.origin
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin  Sun Oct 29 11:18:17 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/README.origin  Tue Oct 31 22:25:12 
2006 -0700
@@ -7,6 +7,7 @@
 
 acpi.h                 -> linux/include/asm-ia64/acpi.h
 asmmacro.h             -> linux/include/asm-ia64/asmmacro.h
+atomic.h               -> linux/include/asm-ia64/atomic.h
 cache.h                        -> linux/include/asm-ia64/cache.h
 gcc_intrin.h           -> linux/include/asm-ia64/gcc_intrin.h
 ia64regs.h             -> linux/include/asm-ia64/ia64regs.h
diff -r 622bb65e2011 -r fa4281cb7a5b 
xen/include/asm-ia64/linux/asm/README.origin
--- a/xen/include/asm-ia64/linux/asm/README.origin      Sun Oct 29 11:18:17 
2006 -0700
+++ b/xen/include/asm-ia64/linux/asm/README.origin      Tue Oct 31 22:25:12 
2006 -0700
@@ -4,7 +4,6 @@
 # needs to be changed, move it to ../linux-xen and follow
 # the instructions in the README there.
 
-atomic.h               -> linux/include/asm-ia64/atomic.h
 bitops.h               -> linux/include/asm-ia64/bitops.h
 break.h                        -> linux/include/asm-ia64/break.h
 byteorder.h            -> linux/include/asm-ia64/byteorder.h
diff -r 622bb65e2011 -r fa4281cb7a5b xen/include/asm-ia64/linux-xen/asm/atomic.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/atomic.h       Tue Oct 31 22:25:12 
2006 -0700
@@ -0,0 +1,183 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below!  The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <linux/types.h>
+
+#include <asm/intrinsics.h>
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+typedef struct { volatile __s64 counter; } atomic64_t;
+
+#define ATOMIC_INIT(i)         ((atomic_t) { (i) })
+#define ATOMIC64_INIT(i)       ((atomic64_t) { (i) })
+
+#define atomic_read(v)         ((v)->counter)
+#define atomic64_read(v)       ((v)->counter)
+
+#define atomic_set(v,i)                (((v)->counter) = (i))
+#define atomic64_set(v,i)      (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+       __s32 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old + i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic64_add (__s64 i, atomic64_t *v)
+{
+       __s64 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old + i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+       __s32 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old - i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+       return new;
+}
+
+static __inline__ int
+ia64_atomic64_sub (__s64 i, atomic64_t *v)
+{
+       __s64 old, new;
+       CMPXCHG_BUGCHECK_DECL
+
+       do {
+               CMPXCHG_BUGCHECK(v);
+               old = atomic_read(v);
+               new = old - i;
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+       return new;
+}
+
+#define atomic_add_return(i,v)                                         \
+({                                                                     \
+       int __ia64_aar_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic_add(__ia64_aar_i, v);                     \
+})
+
+#define atomic64_add_return(i,v)                                       \
+({                                                                     \
+       long __ia64_aar_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic64_add(__ia64_aar_i, v);                   \
+})
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static __inline__ int
+atomic64_add_negative (__s64 i, atomic64_t *v)
+{
+       return atomic64_add_return(i, v) < 0;
+}
+
+#define atomic_sub_return(i,v)                                         \
+({                                                                     \
+       int __ia64_asr_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+})
+
+#define atomic64_sub_return(i,v)                                       \
+({                                                                     \
+       long __ia64_asr_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic64_sub(__ia64_asr_i, v);                   \
+})
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
+#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v)       (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
+#define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v)       (atomic64_sub_return(1, (v)) == 0)
+#define atomic64_inc_and_test(v)       (atomic64_add_return(1, (v)) == 0)
+
+#define atomic_add(i,v)                        atomic_add_return((i), (v))
+#define atomic_sub(i,v)                        atomic_sub_return((i), (v))
+#define atomic_inc(v)                  atomic_add(1, (v))
+#define atomic_dec(v)                  atomic_sub(1, (v))
+
+#define atomic64_add(i,v)              atomic64_add_return((i), (v))
+#define atomic64_sub(i,v)              atomic64_sub_return((i), (v))
+#define atomic64_inc(v)                        atomic64_add(1, (v))
+#define atomic64_dec(v)                        atomic64_sub(1, (v))
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#endif /* _ASM_IA64_ATOMIC_H */
diff -r 622bb65e2011 -r fa4281cb7a5b xen/include/asm-ia64/linux/asm/atomic.h
--- a/xen/include/asm-ia64/linux/asm/atomic.h   Sun Oct 29 11:18:17 2006 -0700
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,183 +0,0 @@
-#ifndef _ASM_IA64_ATOMIC_H
-#define _ASM_IA64_ATOMIC_H
-
-/*
- * Atomic operations that C can't guarantee us.  Useful for
- * resource counting etc..
- *
- * NOTE: don't mess with the types below!  The "unsigned long" and
- * "int" types were carefully placed so as to ensure proper operation
- * of the macros.
- *
- * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-#include <linux/types.h>
-
-#include <asm/intrinsics.h>
-
-/*
- * On IA-64, counter must always be volatile to ensure that that the
- * memory accesses are ordered.
- */
-typedef struct { volatile __s32 counter; } atomic_t;
-typedef struct { volatile __s64 counter; } atomic64_t;
-
-#define ATOMIC_INIT(i)         ((atomic_t) { (i) })
-#define ATOMIC64_INIT(i)       ((atomic64_t) { (i) })
-
-#define atomic_read(v)         ((v)->counter)
-#define atomic64_read(v)       ((v)->counter)
-
-#define atomic_set(v,i)                (((v)->counter) = (i))
-#define atomic64_set(v,i)      (((v)->counter) = (i))
-
-static __inline__ int
-ia64_atomic_add (int i, atomic_t *v)
-{
-       __s32 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old + i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
-       return new;
-}
-
-static __inline__ int
-ia64_atomic64_add (__s64 i, atomic64_t *v)
-{
-       __s64 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old + i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
-       return new;
-}
-
-static __inline__ int
-ia64_atomic_sub (int i, atomic_t *v)
-{
-       __s32 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old - i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
-       return new;
-}
-
-static __inline__ int
-ia64_atomic64_sub (__s64 i, atomic64_t *v)
-{
-       __s64 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old - i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
-       return new;
-}
-
-#define atomic_add_return(i,v)                                         \
-({                                                                     \
-       int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic_add(__ia64_aar_i, v);                     \
-})
-
-#define atomic64_add_return(i,v)                                       \
-({                                                                     \
-       long __ia64_aar_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic64_add(__ia64_aar_i, v);                   \
-})
-
-/*
- * Atomically add I to V and return TRUE if the resulting value is
- * negative.
- */
-static __inline__ int
-atomic_add_negative (int i, atomic_t *v)
-{
-       return atomic_add_return(i, v) < 0;
-}
-
-static __inline__ int
-atomic64_add_negative (__s64 i, atomic64_t *v)
-{
-       return atomic64_add_return(i, v) < 0;
-}
-
-#define atomic_sub_return(i,v)                                         \
-({                                                                     \
-       int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic_sub(__ia64_asr_i, v);                     \
-})
-
-#define atomic64_sub_return(i,v)                                       \
-({                                                                     \
-       long __ia64_asr_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic64_sub(__ia64_asr_i, v);                   \
-})
-
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
-
-#define atomic_sub_and_test(i,v)       (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
-#define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
-#define atomic64_dec_and_test(v)       (atomic64_sub_return(1, (v)) == 0)
-#define atomic64_inc_and_test(v)       (atomic64_add_return(1, (v)) == 0)
-
-#define atomic_add(i,v)                        atomic_add_return((i), (v))
-#define atomic_sub(i,v)                        atomic_sub_return((i), (v))
-#define atomic_inc(v)                  atomic_add(1, (v))
-#define atomic_dec(v)                  atomic_sub(1, (v))
-
-#define atomic64_add(i,v)              atomic64_add_return((i), (v))
-#define atomic64_sub(i,v)              atomic64_sub_return((i), (v))
-#define atomic64_inc(v)                        atomic64_add(1, (v))
-#define atomic64_dec(v)                        atomic64_sub(1, (v))
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec()     barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc()     barrier()
-
-#endif /* _ASM_IA64_ATOMIC_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.