[PATCH v2 2/3] rcutorture: Add support to detect if boost kthread prio is too low

From: Joel Fernandes
Date: Tue Jun 19 2018 - 18:14:56 EST


From: "Joel Fernandes (Google)" <joel@xxxxxxxxxxxxxxxxx>

When rcutorture is built as a built-in, we can detect that and raise the
priority of RCU's kthreads enough that rcutorture's boost threads will
not be able to starve them. We do so in earlier patches.

However, if rcutorture is built as a module, we wouldn't have raised the
RCU thread's priorities and we don't want to raise them for
non-rcutorture-builtin kernels since we don't want to risk folks who may
already be depending on the old thread priorities. For this reason, we
can't test for boost if rcutorture is built as a module. This patch
detects this situation and prints an message from the rcutorture module
if such a scenario is detected so that the user knows what to do inorder
to remedy it (that is manually pass rcutree.kthread_prio of a higher
value so that boosting can be tested).

Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
---
kernel/rcu/rcu.h | 2 ++
kernel/rcu/rcutorture.c | 32 ++++++++++++++++++++++++++++----
kernel/rcu/tree.c | 7 +++++++
3 files changed, 37 insertions(+), 4 deletions(-)

diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index b53745371bfb..0af6ce6d8b66 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -547,6 +547,7 @@ static inline void rcu_force_quiescent_state(void) { }
static inline void rcu_bh_force_quiescent_state(void) { }
static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_bh_get_gp_seq(void);
@@ -555,6 +556,7 @@ unsigned long rcu_exp_batches_completed(void);
unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f2cde4dd432d..2d1af5ffe536 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1786,6 +1786,32 @@ static void rcu_torture_barrier_cleanup(void)
}
}

+static bool rcu_torture_can_boost(void)
+{
+ static int boost_warn_once;
+ int prio;
+
+ if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+ return false;
+
+ prio = rcu_get_gp_kthreads_prio();
+ if (!prio)
+ return false;
+
+ if (prio < 2) {
+ if (boost_warn_once == 1)
+ return false;
+
+ pr_alert("%s: WARN: RCU kthread priority too low to test boosting. "
+ "Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 "
+ "on the kernel command line.\n", KBUILD_MODNAME);
+ boost_warn_once = 1;
+ return false;
+ }
+
+ return true;
+}
+
static enum cpuhp_state rcutor_hp;

static void
@@ -1830,8 +1856,7 @@ rcu_torture_cleanup(void)
torture_stop_kthread(rcu_torture_fqs, fqs_task);
for (i = 0; i < ncbflooders; i++)
torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2)
+ if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp);

/*
@@ -2055,8 +2080,7 @@ rcu_torture_init(void)
test_boost_interval = 1;
if (test_boost_duration < 2)
test_boost_duration = 2;
- if ((test_boost == 1 && cur_ops->can_boost) ||
- test_boost == 2) {
+ if (rcu_torture_can_boost()) {

boost_starttime = jiffies + test_boost_interval * HZ;

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 92c29439eba6..7832dd556490 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -183,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);

+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+ return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
/*
* Number of grace periods between delays, normalized by the duration of
* the delay. The longer the delay, the more the grace periods between
--
2.18.0.rc1.244.gcf134e6275-goog