From 0dc6cbb0bd06fe2c29999b7c6b3c2206b612d1fa Mon Sep 17 00:00:00 2001 From: Jemmy Wong Date: Sat, 14 Jun 2025 15:30:26 +0800 Subject: [PATCH v0 13/13] sched/ext_idle: Scope-based Resource Management Support This change replaces manual lock acquisition and release with lock guards to improve code robustness and reduce the risk of lock mismanagement. Signed-off-by: Jemmy Wong --- kernel/sched/ext_idle.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 6d29d3cbc670..0d280ab06b6d 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -458,7 +458,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool is_prev_allowed; s32 cpu; - preempt_disable(); + guard(preempt)(); /* * Check whether @prev_cpu is still within the allowed set. If not, @@ -485,7 +485,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, /* * This is necessary to protect llc_cpus. */ - rcu_read_lock(); + guard(rcu)(); /* * Determine the subset of CPUs that the task can use in its @@ -528,7 +528,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) && scx_idle_test_and_clear_cpu(prev_cpu)) { cpu = prev_cpu; - goto out_unlock; + return cpu; } /* @@ -550,7 +550,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) && !cpumask_empty(idle_cpumask(waker_node)->cpu)) { if (cpumask_test_cpu(cpu, allowed)) - goto out_unlock; + return cpu; } } @@ -566,7 +566,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && scx_idle_test_and_clear_cpu(prev_cpu)) { cpu = prev_cpu; - goto out_unlock; + return cpu; } /* @@ -575,7 +575,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, if (llc_cpus) { cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE); if (cpu >= 0) - goto out_unlock; + return cpu; } /* @@ -584,7 +584,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, if (numa_cpus) { cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE); if (cpu >= 0) - goto out_unlock; + return cpu; } /* @@ -597,7 +597,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, */ cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE); if (cpu >= 0) - goto out_unlock; + return cpu; /* * Give up if we're strictly looking for a full-idle SMT @@ -605,7 +605,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, */ if (flags & SCX_PICK_IDLE_CORE) { cpu = -EBUSY; - goto out_unlock; + return cpu; } } @@ -614,7 +614,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, */ if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) { cpu = prev_cpu; - goto out_unlock; + return cpu; } /* @@ -623,7 +623,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, if (llc_cpus) { cpu = pick_idle_cpu_in_node(llc_cpus, node, 0); if (cpu >= 0) - goto out_unlock; + return cpu; } /* @@ -632,7 +632,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, if (numa_cpus) { cpu = pick_idle_cpu_in_node(numa_cpus, node, 0); if (cpu >= 0) - goto out_unlock; + return cpu; } /* @@ -645,11 +645,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, */ cpu = scx_pick_idle_cpu(allowed, node, flags); -out_unlock: - rcu_read_unlock(); -out_enable: - preempt_enable(); - return cpu; } -- 2.43.0