[PATCH 6/6] locking/rwsem: Use the force

From: Peter Zijlstra
Date: Thu Feb 23 2023 - 07:35:00 EST


Now that the writer adjustment is done from the wakeup side and
HANDOFF guarantees spinning/stealing is disabled, use the combined
guarantee it ignore spurious READER_BIAS and directly claim the lock.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/locking/lock_events_list.h | 1 +
kernel/locking/rwsem.c | 21 +++++++++++++++++++++
2 files changed, 22 insertions(+)

--- a/kernel/locking/lock_events_list.h
+++ b/kernel/locking/lock_events_list.h
@@ -67,3 +67,4 @@ LOCK_EVENT(rwsem_rlock_handoff) /* # of
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
+LOCK_EVENT(rwsem_wlock_ehandoff) /* # of write lock early handoffs */
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -433,6 +433,26 @@ static void rwsem_writer_wake(struct rw_
lockdep_assert_held(&sem->wait_lock);

count = atomic_long_read(&sem->count);
+
+ /*
+ * Since rwsem_mark_wake() is only called (with WAKE_ANY) when
+ * the lock is unlocked, and the HANDOFF bit guarantees that
+ * all spinning / stealing is disabled, it is posssible to
+ * unconditionally claim the lock -- any READER_BIAS will be
+ * temporary.
+ */
+ if (count & RWSEM_FLAG_HANDOFF) {
+ unsigned long adjustment = RWSEM_WRITER_LOCKED - RWSEM_FLAG_HANDOFF;
+
+ if (list_is_singular(&sem->wait_list))
+ adjustment -= RWSEM_FLAG_WAITERS;
+
+ atomic_long_set(&sem->owner, (long)waiter->task);
+ atomic_long_add(adjustment, &sem->count);
+ lockevent_inc(rwsem_wlock_ehandoff);
+ goto success;
+ }
+
do {
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);

@@ -479,6 +499,7 @@ static void rwsem_writer_wake(struct rw_
return;
}

+success:
/*
* Have rwsem_writer_wake() fully imply rwsem_del_waiter() on
* success.