[RFC PATCH v2 02/25] sched/deadline: Do not access dl_se->rq directly

From: Yuri Andriaccio
Date: Thu Jul 31 2025 - 06:56:05 EST


From: luca abeni <luca.abeni@xxxxxxxxxxxxxxx>

Make deadline.c code access the runqueue of a scheduling entity saved in the
sched_dl_entity data structure. This allows future patches to save different
runqueues in sched_dl_entity other than the global runqueues.

Signed-off-by: luca abeni <luca.abeni@xxxxxxxxxxxxxxx>
Signed-off-by: Yuri Andriaccio <yurand2000@xxxxxxxxx>
---
kernel/sched/deadline.c | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8ba6bf3ef68..46b9b78cca2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -892,7 +892,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
* and arm the defer timer.
*/
if (dl_se->dl_defer && !dl_se->dl_defer_running &&
- dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
+ dl_time_before(rq_clock(rq), dl_se->deadline - dl_se->runtime)) {
if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) {

/*
@@ -1202,11 +1202,11 @@ static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_
* of time. The dl_server_min_res serves as a limit to avoid
* forwarding the timer for a too small amount of time.
*/
- if (dl_time_before(rq_clock(dl_se->rq),
+ if (dl_time_before(rq_clock(rq),
(dl_se->deadline - dl_se->runtime - dl_server_min_res))) {

/* reset the defer timer */
- fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
+ fw = dl_se->deadline - rq_clock(rq) - dl_se->runtime;

hrtimer_forward_now(timer, ns_to_ktime(fw));
return HRTIMER_RESTART;
@@ -1217,7 +1217,7 @@ static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_

enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);

- if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
+ if (!dl_task(rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
resched_curr(rq);

__push_dl_task(rq, rf);
@@ -1485,7 +1485,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64

hrtimer_try_to_cancel(&dl_se->dl_timer);

- replenish_dl_new_period(dl_se, dl_se->rq);
+ replenish_dl_new_period(dl_se, rq);

/*
* Not being able to start the timer seems problematic. If it could not
@@ -1597,21 +1597,22 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
/* 0 runtime = fair server disabled */
if (dl_se->dl_runtime) {
dl_se->dl_server_idle = 0;
- update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
+ update_curr_dl_se(rq_of_dl_se(dl_se), dl_se, delta_exec);
}
}

void dl_server_start(struct sched_dl_entity *dl_se)
{
- struct rq *rq = dl_se->rq;
+ struct rq *rq;

if (!dl_server(dl_se) || dl_se->dl_server_active)
return;

dl_se->dl_server_active = 1;
enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
- if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
- resched_curr(dl_se->rq);
+ rq = rq_of_dl_se(dl_se);
+ if (!dl_task(rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
+ resched_curr(rq);
}

void dl_server_stop(struct sched_dl_entity *dl_se)
@@ -1667,9 +1668,9 @@ void sched_init_dl_servers(void)

WARN_ON(dl_server(dl_se));

- dl_server_apply_params(dl_se, runtime, period, 1);
-
dl_se->dl_server = 1;
+ BUG_ON(dl_server_apply_params(dl_se, runtime, period, 1));
+
dl_se->dl_defer = 1;
setup_new_dl_entity(dl_se);
}
@@ -1678,7 +1679,7 @@ void sched_init_dl_servers(void)
int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
{
u64 max_bw, new_bw = to_ratio(period, runtime);
- struct rq *rq = dl_se->rq;
+ struct rq *rq = rq_of_dl_se(dl_se);
int cpu = cpu_of(rq);
struct dl_bw *dl_b;
unsigned long cap;
@@ -1752,7 +1753,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
p = dl_task_of(dl_se);
rq = task_rq_lock(p, &rf);
} else {
- rq = dl_se->rq;
+ rq = rq_of_dl_se(dl_se);
rq_lock(rq, &rf);
}

--
2.50.1