[PATCH 10/30] sched: Add @reason to sched_class->rq_{on|off}line()

From: Tejun Heo
Date: Fri Jan 27 2023 - 19:18:02 EST


->rq_{on|off}line are called either during CPU hotplug or cpuset partition
updates. A planned BPF extensible sched_class wants to tell the BPF
scheduler progs about CPU hotplug events in a way that's synchronized with
rq state changes.

As the BPF scheduler progs aren't necessarily affected by cpuset partition
updates, we need a way to distinguish the two types of events. Let's add an
argument to tell them apart.

v2: Patch description updated to detail the expected use.

Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
Reviewed-by: David Vernet <dvernet@xxxxxxxx>
Acked-by: Josh Don <joshdon@xxxxxxxxxx>
Acked-by: Hao Luo <haoluo@xxxxxxxxxx>
Acked-by: Barret Rhoden <brho@xxxxxxxxxx>
---
kernel/sched/core.c | 12 ++++++------
kernel/sched/deadline.c | 4 ++--
kernel/sched/fair.c | 4 ++--
kernel/sched/rt.c | 4 ++--
kernel/sched/sched.h | 13 +++++++++----
kernel/sched/topology.c | 4 ++--
6 files changed, 23 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 729de63ee6ec..6447f10ecd44 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9355,7 +9355,7 @@ static inline void balance_hotplug_wait(void)

#endif /* CONFIG_HOTPLUG_CPU */

-void set_rq_online(struct rq *rq)
+void set_rq_online(struct rq *rq, enum rq_onoff_reason reason)
{
if (!rq->online) {
const struct sched_class *class;
@@ -9365,19 +9365,19 @@ void set_rq_online(struct rq *rq)

for_each_class(class) {
if (class->rq_online)
- class->rq_online(rq);
+ class->rq_online(rq, reason);
}
}
}

-void set_rq_offline(struct rq *rq)
+void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->online) {
const struct sched_class *class;

for_each_class(class) {
if (class->rq_offline)
- class->rq_offline(rq);
+ class->rq_offline(rq, reason);
}

cpumask_clear_cpu(rq->cpu, rq->rd->online);
@@ -9473,7 +9473,7 @@ int sched_cpu_activate(unsigned int cpu)
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
+ set_rq_online(rq, RQ_ONOFF_HOTPLUG);
}
rq_unlock_irqrestore(rq, &rf);

@@ -9518,7 +9518,7 @@ int sched_cpu_deactivate(unsigned int cpu)
if (rq->rd) {
update_rq_clock(rq);
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
+ set_rq_offline(rq, RQ_ONOFF_HOTPLUG);
}
rq_unlock_irqrestore(rq, &rf);

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0d97d54276cc..f264814e0513 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2518,7 +2518,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
}

/* Assumes rq->lock is held */
-static void rq_online_dl(struct rq *rq)
+static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
@@ -2529,7 +2529,7 @@ static void rq_online_dl(struct rq *rq)
}

/* Assumes rq->lock is held */
-static void rq_offline_dl(struct rq *rq)
+static void rq_offline_dl(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fbedf99ed953..b4db8943ed8b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11673,14 +11673,14 @@ void trigger_load_balance(struct rq *rq)
nohz_balancer_kick(rq);
}

-static void rq_online_fair(struct rq *rq)
+static void rq_online_fair(struct rq *rq, enum rq_onoff_reason reason)
{
update_sysctl();

update_runtime_enabled(rq);
}

-static void rq_offline_fair(struct rq *rq)
+static void rq_offline_fair(struct rq *rq, enum rq_onoff_reason reason)
{
update_sysctl();

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ed2a47e4ddae..0fb7ee087669 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2470,7 +2470,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
}

/* Assumes rq->lock is held */
-static void rq_online_rt(struct rq *rq)
+static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
@@ -2481,7 +2481,7 @@ static void rq_online_rt(struct rq *rq)
}

/* Assumes rq->lock is held */
-static void rq_offline_rt(struct rq *rq)
+static void rq_offline_rt(struct rq *rq, enum rq_onoff_reason reason)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index dd567eb7881a..cc1163b15aa0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2180,6 +2180,11 @@ extern const u32 sched_prio_to_wmult[40];

#define RETRY_TASK ((void *)-1UL)

+enum rq_onoff_reason {
+ RQ_ONOFF_HOTPLUG, /* CPU is going on/offline */
+ RQ_ONOFF_TOPOLOGY, /* sched domain topology update */
+};
+
struct affinity_context {
const struct cpumask *new_mask;
struct cpumask *user_mask;
@@ -2216,8 +2221,8 @@ struct sched_class {

void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);

- void (*rq_online)(struct rq *rq);
- void (*rq_offline)(struct rq *rq);
+ void (*rq_online)(struct rq *rq, enum rq_onoff_reason reason);
+ void (*rq_offline)(struct rq *rq, enum rq_onoff_reason reason);

struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
#endif
@@ -2782,8 +2787,8 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
raw_spin_rq_unlock(rq1);
}

-extern void set_rq_online (struct rq *rq);
-extern void set_rq_offline(struct rq *rq);
+extern void set_rq_online (struct rq *rq, enum rq_onoff_reason reason);
+extern void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason);
extern bool sched_smp_initialized;

#else /* CONFIG_SMP */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8739c2a5a54e..0e859bea1cb6 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
old_rd = rq->rd;

if (cpumask_test_cpu(rq->cpu, old_rd->online))
- set_rq_offline(rq);
+ set_rq_offline(rq, RQ_ONOFF_TOPOLOGY);

cpumask_clear_cpu(rq->cpu, old_rd->span);

@@ -511,7 +511,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)

cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
- set_rq_online(rq);
+ set_rq_online(rq, RQ_ONOFF_TOPOLOGY);

raw_spin_rq_unlock_irqrestore(rq, flags);

--
2.39.1