[PATCH v3 1/3] sched_ext: Fix pnt_seq calculation
From: liuwenfang
Date: Sun Jul 20 2025 - 05:36:35 EST
Fix pnt_seq calculation for all transitions.
Signed-off-by: Wenfang Liu liuwenfang@xxxxxxxxx
---
kernel/sched/ext.c | 23 ++++++++++++++---------
kernel/sched/fair.c | 3 +++
kernel/sched/sched.h | 8 ++++++++
3 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index f5133249f..93e03b7d0 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3191,14 +3191,6 @@ static void switch_class(struct rq *rq, struct task_struct *next)
{
const struct sched_class *next_class = next->sched_class;
-#ifdef CONFIG_SMP
- /*
- * Pairs with the smp_load_acquire() issued by a CPU in
- * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
- * resched.
- */
- smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
-#endif
if (!static_branch_unlikely(&scx_ops_cpu_preempt))
return;
@@ -3233,6 +3225,19 @@ static void switch_class(struct rq *rq, struct task_struct *next)
}
}
+void scx_put_prev_set_next(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Pairs with the smp_load_acquire() issued by a CPU in
+ * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
+ * resched.
+ */
+ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
+#endif
+}
+
static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
struct task_struct *next)
{
@@ -5966,7 +5971,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
if (cpu != cpu_of(this_rq)) {
/*
* Pairs with smp_store_release() issued by this CPU in
- * switch_class() on the resched path.
+ * scx_put_prev_set_next() on the resched path.
*
* We busy-wait here to guarantee that no other task can
* be scheduled on our core before the target CPU has
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0fb9bf995..50d757e92 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8887,6 +8887,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
__put_prev_set_next_dl_server(rq, prev, p);
+ if (scx_enabled())
+ scx_put_prev_set_next(rq, prev, p);
+
/*
* Because of the set_next_buddy() in dequeue_task_fair() it is rather
* likely that a next task is from the same cgroup as the current.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 47972f34e..bcb7f175c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1738,12 +1738,17 @@ static inline void scx_rq_clock_invalidate(struct rq *rq)
WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
}
+void scx_put_prev_set_next(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next);
#else /* !CONFIG_SCHED_CLASS_EXT */
#define scx_enabled() false
#define scx_switched_all() false
static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
static inline void scx_rq_clock_invalidate(struct rq *rq) {}
+static inline void scx_put_prev_set_next(struct rq *rq,
+ struct task_struct *prev,
+ struct task_struct *next) {}
#endif /* !CONFIG_SCHED_CLASS_EXT */
/*
@@ -2465,6 +2470,9 @@ static inline void put_prev_set_next_task(struct rq *rq,
__put_prev_set_next_dl_server(rq, prev, next);
+ if (scx_enabled())
+ scx_put_prev_set_next(rq, prev, next);
+
if (next == prev)
return;
--
2.17.1