[RFC][PATCH 05/17] x86: Optimize arch_spin_unlock_wait()

From: Peter Zijlstra
Date: Fri Dec 24 2010 - 07:43:10 EST


Only wait for the current holder to release the lock.

spin_unlock_wait() can only be about the current holder, since
completion of this function is inherently racy with new contenders.
Therefore, there is no reason to wait until the lock is completely
unlocked.

Cc: Nick Piggin <npiggin@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
arch/x86/include/asm/spinlock.h | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)

Index: linux-2.6/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6/arch/x86/include/asm/spinlock.h
@@ -158,18 +158,32 @@ static __always_inline void __ticket_spi
}
#endif

+#define TICKET_MASK ((1 << TICKET_SHIFT) - 1)
+
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);

- return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}

static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);

- return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
+ return (((tmp >> TICKET_SHIFT) - tmp) & TICKET_MASK) > 1;
+}
+
+static inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ int tmp = ACCESS_ONCE(lock->slock);
+
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+ return; /* not locked */
+
+ /* wait until the current lock holder goes away */
+ while ((lock->slock & TICKET_MASK) == (tmp & TICKET_MASK))
+ cpu_relax();
}

#ifndef CONFIG_PARAVIRT_SPINLOCKS
@@ -206,7 +220,11 @@ static __always_inline void arch_spin_lo
arch_spin_lock(lock);
}

-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ __ticket_spin_unlock_wait(lock);
+}
+#else

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
@@ -214,6 +232,8 @@ static inline void arch_spin_unlock_wait
cpu_relax();
}

+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/