[RFC v2 7/7] powerpc: atomic: Make atomic{,64}_cmpxchg and cmpxchg a full barrier

From: Boqun Feng
Date: Wed Sep 16 2015 - 11:50:54 EST


According to memory-barriers.txt, cmpxchg and its atomic{,64}_ versions
need to imply a full barrier, however they are now just RELEASE+ACQUIRE,
which is not a full barrier.

So replace PPC_RELEASE_BARRIER and PPC_ACQUIRE_BARRIER with
PPC_ATOMIC_ENTRY_BARRIER and PPC_ATOMIC_EXIT_BARRIER in
__cmpxchg_{u32,u64} respectively to guarantee full-barrier semantics
of atomic{,64}_cmpxchg() and cmpxchg().

Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
arch/powerpc/include/asm/cmpxchg.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 9f0379a..5c58743 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev;

__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
@@ -239,13 +239,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev;

__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
--
2.5.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/