[PATCH for review] [38/145] x86_64: Switch rwlocks over to patchable lock prefix

From: Andi Kleen
Date: Thu Aug 10 2006 - 16:17:31 EST


r

This way their lock prefix can be patched away on UP

Signed-off-by: Andi Kleen <ak@xxxxxxx>

---
include/asm-x86_64/spinlock.h | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

Index: linux/include/asm-x86_64/spinlock.h
===================================================================
--- linux.orig/include/asm-x86_64/spinlock.h
+++ linux/include/asm-x86_64/spinlock.h
@@ -125,13 +125,13 @@ static inline int __raw_write_trylock(ra

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
- : "=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "=m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}

#endif /* __ASM_SPINLOCK_H */
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/