[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] mini-os: fix bit ops comments and memory clobbers.
This fixes comments about test_and_clear_bit, set_bit and clear_bit, which are actually not atomic, can be reordered and are not memory barriers. This also drops the empty LOCK and LOCK_PREFIX macros which bring to the confusion. This also adds missing memory clobbers to set_bit and clear_bit. Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxx> diff --git a/extras/mini-os/include/x86/os.h b/extras/mini-os/include/x86/os.h index f193865..510c632 100644 --- a/extras/mini-os/include/x86/os.h +++ b/extras/mini-os/include/x86/os.h @@ -152,8 +152,6 @@ do { \ #endif -#define LOCK_PREFIX "" -#define LOCK "" #define ADDR (*(volatile long *) addr) /* * Make sure gcc doesn't try to be clever and move things around @@ -200,15 +198,13 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz * @nr: Bit to clear * @addr: Address to count from * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. + * This operation is not atomic and can be reordered. */ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) { int oldbit; - __asm__ __volatile__( LOCK + __asm__ __volatile__( "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"=m" (ADDR) :"Ir" (nr) : "memory"); @@ -241,22 +237,17 @@ static inline int variable_test_bit(int nr, const volatile unsigned long * addr) * @nr: the bit to set * @addr: the address to start counting from * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writting portable code, - * make sure not to rely on its reordering guarantees. + * This function is not atomic and may be reordered. * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(int nr, volatile unsigned long * addr) { - __asm__ __volatile__( LOCK + __asm__ __volatile__( "btsl %1,%0" :"=m" (ADDR) - :"Ir" (nr)); + :"Ir" (nr): "memory"); } /** @@ -271,10 +262,10 @@ static inline void set_bit(int nr, volatile unsigned long * addr) */ static inline void clear_bit(int nr, volatile unsigned long * addr) { - __asm__ __volatile__( LOCK + __asm__ __volatile__( "btrl %1,%0" :"=m" (ADDR) - :"Ir" (nr)); + :"Ir" (nr): "memory"); } /** @@ -347,14 +338,13 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz * @nr: Bit to clear * @addr: Address to count from * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. + * This operation is not atomic and can be reordered. */ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int oldbit; - __asm__ __volatile__( LOCK_PREFIX + __asm__ __volatile__( "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"=m" (ADDR) :"dIr" (nr) : "memory"); @@ -388,14 +378,14 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr) * @nr: the bit to set * @addr: the address to start counting from * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. + * This function is not atomic and may be reordered. + * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static __inline__ void set_bit(int nr, volatile void * addr) { - __asm__ __volatile__( LOCK_PREFIX + __asm__ __volatile__( "btsl %1,%0" :"=m" (ADDR) :"dIr" (nr) : "memory"); @@ -406,17 +396,14 @@ static __inline__ void set_bit(int nr, volatile void * addr) * @nr: Bit to clear * @addr: Address to start counting from * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. + * clear_bit() is not atomic and may be reordered. */ static __inline__ void clear_bit(int nr, volatile void * addr) { - __asm__ __volatile__( LOCK_PREFIX + __asm__ __volatile__( "btrl %1,%0" :"=m" (ADDR) - :"dIr" (nr)); + :"dIr" (nr): "memory"); } /** @@ -512,6 +499,8 @@ static inline unsigned long __synch_cmpxchg(volatile void *ptr, return old; } +/* These are atomic and may not be reordered. They also imply a full memory + * barrier. */ static __inline__ void synch_set_bit(int nr, volatile void * addr) { _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |