[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: Clean up atomic.h comments and asm specifiers.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1238496440 -3600 # Node ID cd6b3af1919103f48a173d7250faea0bad2a8d60 # Parent 80ecfc3d6a8efb824729657fa9a55e7f0a9b447b x86: Clean up atomic.h comments and asm specifiers. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/include/asm-x86/atomic.h | 46 ++++++++++++++++--------------------------- 1 files changed, 18 insertions(+), 28 deletions(-) diff -r 80ecfc3d6a8e -r cd6b3af19191 xen/include/asm-x86/atomic.h --- a/xen/include/asm-x86/atomic.h Tue Mar 31 11:41:13 2009 +0100 +++ b/xen/include/asm-x86/atomic.h Tue Mar 31 11:47:20 2009 +0100 @@ -23,8 +23,7 @@ typedef struct { int counter; } atomic_t * atomic_read - read atomic variable * @v: pointer of type atomic_t * - * Atomically reads the value of @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically reads the value of @v. */ #define _atomic_read(v) ((v).counter) #define atomic_read(v) (*(volatile int *)&((v)->counter)) @@ -34,8 +33,7 @@ typedef struct { int counter; } atomic_t * @v: pointer of type atomic_t * @i: required value * - * Atomically sets the value of @v to @i. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically sets the value of @v to @i. */ #define _atomic_set(v,i) (((v).counter) = (i)) #define atomic_set(v,i) (*(volatile int *)&((v)->counter) = (i)) @@ -45,12 +43,11 @@ typedef struct { int counter; } atomic_t * @i: integer value to add * @v: pointer of type atomic_t * - * Atomically adds @i to @v. Note that the guaranteed useful range - * of an atomic_t is only 24 bits. + * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { - __asm__ __volatile__( + asm volatile( LOCK "addl %1,%0" :"=m" (*(volatile int *)&v->counter) :"ir" (i), "m" (*(volatile int *)&v->counter)); @@ -61,12 +58,11 @@ static __inline__ void atomic_add(int i, * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { - __asm__ __volatile__( + asm volatile( LOCK "subl %1,%0" :"=m" (*(volatile int *)&v->counter) :"ir" (i), "m" (*(volatile int *)&v->counter)); @@ -79,14 +75,13 @@ static __inline__ void atomic_sub(int i, * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( + asm volatile( LOCK "subl %2,%0; sete %1" :"=m" (*(volatile int *)&v->counter), "=qm" (c) :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory"); @@ -97,12 +92,11 @@ static __inline__ int atomic_sub_and_tes * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * - * Atomically increments @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { - __asm__ __volatile__( + asm volatile( LOCK "incl %0" :"=m" (*(volatile int *)&v->counter) :"m" (*(volatile int *)&v->counter)); @@ -112,12 +106,11 @@ static __inline__ void atomic_inc(atomic * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * - * Atomically decrements @v by 1. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { - __asm__ __volatile__( + asm volatile( LOCK "decl %0" :"=m" (*(volatile int *)&v->counter) :"m" (*(volatile int *)&v->counter)); @@ -129,14 +122,13 @@ static __inline__ void atomic_dec(atomic * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other - * cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( + asm volatile( LOCK "decl %0; sete %1" :"=m" (*(volatile int *)&v->counter), "=qm" (c) :"m" (*(volatile int *)&v->counter) : "memory"); @@ -149,14 +141,13 @@ static __inline__ int atomic_dec_and_tes * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all - * other cases. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { unsigned char c; - __asm__ __volatile__( + asm volatile( LOCK "incl %0; sete %1" :"=m" (*(volatile int *)&v->counter), "=qm" (c) :"m" (*(volatile int *)&v->counter) : "memory"); @@ -170,14 +161,13 @@ static __inline__ int atomic_inc_and_tes * * Atomically adds @i to @v and returns true * if the result is negative, or false when - * result is greater than or equal to zero. Note that the guaranteed - * useful range of an atomic_t is only 24 bits. + * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { unsigned char c; - __asm__ __volatile__( + asm volatile( LOCK "addl %2,%0; sets %1" :"=m" (*(volatile int *)&v->counter), "=qm" (c) :"ir" (i), "m" (*(volatile int *)&v->counter) : "memory"); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |