[PATCH v2 03/12] locking/mutex: Add task_struct::blocked_lock to serialize changes to the blocked_on state

From: John Stultz
Date: Mon Mar 20 2023 - 19:37:51 EST


From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

This patch was split out from the later "sched: Add proxy
execution" patch.

Adds blocked_lock to the task_struct so we can safely keep track
of which tasks are blocked on us.

This will be used for tracking blocked-task/mutex chains with
the prox-execution patch in a similar fashion to how priority
inheritence is done with rt_mutexes.

Cc: Joel Fernandes <joelaf@xxxxxxxxxx>
Cc: Qais Yousef <qyousef@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Cc: Valentin Schneider <vschneid@xxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Ben Segall <bsegall@xxxxxxxxxx>
Cc: Zimuzo Ezeozue <zezeozue@xxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxx>
Cc: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
Cc: "Paul E . McKenney" <paulmck@xxxxxxxxxx>
Cc: kernel-team@xxxxxxxxxxx
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
[rebased, added comments and changelog]
Signed-off-by: Juri Lelli <juri.lelli@xxxxxxxxxx>
[Fixed rebase conflicts]
[squashed sched: Ensure blocked_on is always guarded by blocked_lock]
Signed-off-by: Valentin Schneider <valentin.schneider@xxxxxxx>
[fix rebase conflicts, various fixes & tweaks commented inline]
[squashed sched: Use rq->curr vs rq->proxy checks]
Signed-off-by: Connor O'Brien <connoro@xxxxxxxxxx>
[jstultz: Split out from bigger patch]
Signed-off-by: John Stultz <jstultz@xxxxxxxxxx>
---
v2:
* Split out into its own patch

TODO: Still need to clarify some of the locking changes here
---
include/linux/sched.h | 1 +
init/init_task.c | 1 +
kernel/fork.c | 1 +
kernel/locking/mutex.c | 27 +++++++++++++++++++++++----
4 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9924d1926bc3..031615b5dc2a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1141,6 +1141,7 @@ struct task_struct {

struct task_struct *blocked_proxy; /* task that is boosting us */
struct mutex *blocked_on; /* lock we're blocked on */
+ raw_spinlock_t blocked_lock;

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
diff --git a/init/init_task.c b/init/init_task.c
index ff6c4b9bfe6b..189ce67e9704 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -130,6 +130,7 @@ struct task_struct init_task
.journal_info = NULL,
INIT_CPU_TIMERS(init_task)
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
+ .blocked_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.blocked_lock),
.timer_slack_ns = 50000, /* 50 usec default slack */
.thread_pid = &init_struct_pid,
.thread_group = LIST_HEAD_INIT(init_task.thread_group),
diff --git a/kernel/fork.c b/kernel/fork.c
index ad27fa09fe70..95410333332f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2121,6 +2121,7 @@ static __latent_entropy struct task_struct *copy_process(
ftrace_graph_init_task(p);

rt_mutex_init_task(p);
+ raw_spin_lock_init(&p->blocked_lock);

lockdep_assert_irqs_enabled();
#ifdef CONFIG_PROVE_LOCKING
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index f5296aa82255..2f31ebb08b4a 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -615,6 +615,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}

raw_spin_lock(&lock->wait_lock);
+ raw_spin_lock(&current->blocked_lock);
/*
* After waiting to acquire the wait_lock, try again.
*/
@@ -676,6 +677,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
goto err;
}

+ raw_spin_unlock(&current->blocked_lock);
raw_spin_unlock(&lock->wait_lock);
if (ww_ctx)
ww_ctx_wake(ww_ctx);
@@ -683,6 +685,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas

first = __mutex_waiter_is_first(lock, &waiter);

+ raw_spin_lock(&lock->wait_lock);
+ raw_spin_lock(&current->blocked_lock);
/*
* Gets reset by ttwu_runnable().
*/
@@ -697,15 +701,28 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
break;

if (first) {
+ bool acquired;
+
+ /*
+ * XXX connoro: mutex_optimistic_spin() can schedule, so
+ * we need to release these locks before calling it.
+ * This needs refactoring though b/c currently we take
+ * the locks earlier than necessary when proxy exec is
+ * disabled and release them unnecessarily when it's
+ * enabled. At a minimum, need to verify that releasing
+ * blocked_lock here doesn't create any races.
+ */
+ raw_spin_unlock(&current->blocked_lock);
+ raw_spin_unlock(&lock->wait_lock);
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
- if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
+ acquired = mutex_optimistic_spin(lock, ww_ctx, &waiter);
+ raw_spin_lock(&lock->wait_lock);
+ raw_spin_lock(&current->blocked_lock);
+ if (acquired)
break;
trace_contention_begin(lock, LCB_F_MUTEX);
}
-
- raw_spin_lock(&lock->wait_lock);
}
- raw_spin_lock(&lock->wait_lock);
acquired:
__set_current_state(TASK_RUNNING);

@@ -732,6 +749,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx);

+ raw_spin_unlock(&current->blocked_lock);
raw_spin_unlock(&lock->wait_lock);
if (ww_ctx)
ww_ctx_wake(ww_ctx);
@@ -744,6 +762,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
trace_contention_end(lock, ret);
+ raw_spin_unlock(&current->blocked_lock);
raw_spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, ip);
--
2.40.0.rc1.284.g88254d51c5-goog