[RFC PATCH 1/3] sched: add bool return value to sched_class::yield_task()

From: Kuba Piecuch
Date: Fri Aug 08 2025 - 16:03:50 EST


The return value controls whether do_sched_yield() ends up calling
schedule(). This can be used e.g. as an optimization when we're certain
that the yield won't result in another task being scheduled.

This patch does not change the current behavior, i.e. every call to
do_sched_yield() will still go through to schedule().

Signed-off-by: Kuba Piecuch <jpiecuch@xxxxxxxxxx>
---
kernel/sched/deadline.c | 4 +++-
kernel/sched/ext.c | 4 +++-
kernel/sched/fair.c | 6 ++++--
kernel/sched/rt.c | 3 ++-
kernel/sched/sched.h | 2 +-
kernel/sched/stop_task.c | 2 +-
kernel/sched/syscalls.c | 9 ++++++++-
7 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 89019a1408264..d8dcb73bd3433 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2194,7 +2194,7 @@ static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* yield_task_dl will indicate that some spare budget
* is available for other task instances to use it.
*/
-static void yield_task_dl(struct rq *rq)
+static bool yield_task_dl(struct rq *rq)
{
/*
* We make the task go to sleep until its current deadline by
@@ -2212,6 +2212,8 @@ static void yield_task_dl(struct rq *rq)
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
+
+ return true;
}

#ifdef CONFIG_SMP
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7dd5cbcb7a069..dd0a0b6b7aa05 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2502,7 +2502,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
return true;
}

-static void yield_task_scx(struct rq *rq)
+static bool yield_task_scx(struct rq *rq)
{
struct scx_sched *sch = scx_root;
struct task_struct *p = rq->curr;
@@ -2511,6 +2511,8 @@ static void yield_task_scx(struct rq *rq)
SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
else
p->scx.slice = 0;
+
+ return true;
}

static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7a14da5396fb2..c06a2f8290822 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9010,7 +9010,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct t
/*
* sched_yield() is very simple
*/
-static void yield_task_fair(struct rq *rq)
+static bool yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -9020,7 +9020,7 @@ static void yield_task_fair(struct rq *rq)
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
- return;
+ return true;

clear_buddies(cfs_rq, se);

@@ -9037,6 +9037,8 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);

se->deadline += calc_delta_fair(se->slice, se);
+
+ return true;
}

static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e40422c370335..1fa535457cc40 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1533,9 +1533,10 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
}
}

-static void yield_task_rt(struct rq *rq)
+static bool yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr, 0);
+ return true;
}

#ifdef CONFIG_SMP
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 83e3aa9171429..8b2cd54a09942 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2395,7 +2395,7 @@ struct sched_class {

void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
- void (*yield_task) (struct rq *rq);
+ bool (*yield_task) (struct rq *rq);
bool (*yield_to_task)(struct rq *rq, struct task_struct *p);

void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 058dd42e3d9b5..c6da16a39e08d 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -54,7 +54,7 @@ dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
return true;
}

-static void yield_task_stop(struct rq *rq)
+static bool yield_task_stop(struct rq *rq)
{
BUG(); /* the stop task should never yield, its pointless. */
}
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 547c1f05b667e..e708a31c8e313 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -1354,11 +1354,18 @@ static void do_sched_yield(void)
{
struct rq_flags rf;
struct rq *rq;
+ bool should_resched;

rq = this_rq_lock_irq(&rf);

+ should_resched = current->sched_class->yield_task(rq);
+
+ if (unlikely(!should_resched)) {
+ rq_unlock_irq(rq, &rf);
+ return;
+ }
+
schedstat_inc(rq->yld_count);
- current->sched_class->yield_task(rq);

preempt_disable();
rq_unlock_irq(rq, &rf);
--
2.51.0.rc0.155.g4a0f42376b-goog