[PATCH RFC rcu 09/19] srcu: Move ->srcu_gp_mutex from srcu_struct to srcu_usage

From: Paul E. McKenney
Date: Thu Mar 23 2023 - 20:20:23 EST


This commit moves the ->srcu_gp_mutex field from the srcu_struct structure
to the srcu_usage structure to reduce the size of the former in order
to improve cache locality.

Suggested-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx>
---
include/linux/srcutree.h | 2 +-
kernel/rcu/srcutree.c | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 6fe5743c1179..6402cd5a93ef 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -67,13 +67,13 @@ struct srcu_usage {
int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
spinlock_t __private lock; /* Protect counters and size state. */
+ struct mutex srcu_gp_mutex; /* Serialize GP work. */
};

/*
* Per-SRCU-domain structure, similar in function to rcu_state.
*/
struct srcu_struct {
- struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 579c43ff0f4b..3bf3408c7716 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -245,7 +245,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
ssp->srcu_sup->node = NULL;
mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
- mutex_init(&ssp->srcu_gp_mutex);
+ mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
ssp->srcu_idx = 0;
ssp->srcu_gp_seq = 0;
ssp->srcu_barrier_seq = 0;
@@ -876,7 +876,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
spin_unlock_irq_rcu_node(ssp->srcu_sup);
- mutex_unlock(&ssp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */

/* Initiate callback invocation as needed. */
@@ -1607,7 +1607,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
{
int idx;

- mutex_lock(&ssp->srcu_gp_mutex);
+ mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);

/*
* Because readers might be delayed for an extended period after
@@ -1625,7 +1625,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
spin_unlock_irq_rcu_node(ssp->srcu_sup);
- mutex_unlock(&ssp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
return;
}
idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
@@ -1633,7 +1633,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
srcu_gp_start(ssp);
spin_unlock_irq_rcu_node(ssp->srcu_sup);
if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&ssp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
return; /* Someone else started the grace period. */
}
}
@@ -1641,7 +1641,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
idx = 1 ^ (ssp->srcu_idx & 1);
if (!try_check_zero(ssp, idx, 1)) {
- mutex_unlock(&ssp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
return; /* readers present, retry later. */
}
srcu_flip(ssp);
@@ -1659,7 +1659,7 @@ static void srcu_advance_state(struct srcu_struct *ssp)
*/
idx = 1 ^ (ssp->srcu_idx & 1);
if (!try_check_zero(ssp, idx, 2)) {
- mutex_unlock(&ssp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
return; /* readers present, retry later. */
}
ssp->srcu_n_exp_nodelay = 0;
--
2.40.0.rc2