[PATCH 1/3] powerpc/spinlock: Define smp_mb__after_spinlock only once

From: Davidlohr Bueso
Date: Mon Mar 08 2021 - 21:00:55 EST


Instead of both queued and simple spinlocks doing it. Move
it into the arch's spinlock.h.

Signed-off-by: Davidlohr Bueso <dbueso@xxxxxxx>
---
arch/powerpc/include/asm/qspinlock.h | 2 --
arch/powerpc/include/asm/simple_spinlock.h | 3 ---
arch/powerpc/include/asm/spinlock.h | 3 +++
3 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index b752d34517b3..3ce1a0bee4fe 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -44,8 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
}
#define queued_spin_lock queued_spin_lock

-#define smp_mb__after_spinlock() smp_mb()
-
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 9c3c30534333..3e87258f73b1 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -282,7 +282,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) rw_yield(lock)
#define arch_write_relax(lock) rw_yield(lock)

-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock() smp_mb()
-
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 6ec72282888d..bd75872a6334 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -10,6 +10,9 @@
#include <asm/simple_spinlock.h>
#endif

+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void pv_spinlocks_init(void) { }
#endif
--
2.26.2