[PATCH 13/19] sched/fair: Handle moving single tasks to/from their preferred LLC

From: Tim Chen

Date: Sat Oct 11 2025 - 14:20:19 EST


If the busiest runqueue has only one task, active balancing may be
invoked to move it. However, before migration, check whether the task
is running on its preferred LLC.

Do not move a lone task to another LLC if it would move the task
away from its preferred LLC or cause excessive imbalance between LLCs.

Co-developed-by: Chen Yu <yu.c.chen@xxxxxxxxx>
Signed-off-by: Chen Yu <yu.c.chen@xxxxxxxxx>
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
kernel/sched/fair.c | 62 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 59 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bec6354d7841..19ba9c1b9a63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9826,12 +9826,53 @@ static __maybe_unused enum llc_mig can_migrate_llc_task(int src_cpu, int dst_cpu
return can_migrate_llc(src_cpu, dst_cpu, task_util(p), to_pref);
}

+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ if (!sched_cache_enabled())
+ return false;
+
+ if (cpus_share_cache(env->src_cpu, env->dst_cpu))
+ return false;
+ /*
+ * All tasks prefer to stay on their current CPU.
+ * Do not pull a task from its preferred CPU if:
+ * 1. It is the only task running there; OR
+ * 2. Migrating it away from its preferred LLC would violate
+ * the cache-aware scheduling policy.
+ */
+ if (env->src_rq->nr_pref_llc_running == env->src_rq->cfs.h_nr_runnable) {
+ unsigned long util = 0;
+ struct task_struct *cur;
+
+ if (env->src_rq->nr_running <= 1)
+ return true;
+
+ rcu_read_lock();
+ cur = rcu_dereference(env->src_rq->curr);
+ if (cur)
+ util = task_util(cur);
+ rcu_read_unlock();
+
+ if (can_migrate_llc(env->src_cpu, env->dst_cpu,
+ util, false) == mig_forbid)
+ return true;
+ }
+
+ return false;
+}
#else
static inline bool get_llc_stats(int cpu, unsigned long *util,
unsigned long *cap)
{
return false;
}
+
+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ return false;
+}
#endif
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
@@ -12247,6 +12288,9 @@ static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;

+ if (break_llc_locality(env))
+ return 0;
+
if (asym_active_balance(env))
return 1;

@@ -12266,7 +12310,8 @@ static int need_active_balance(struct lb_env *env)
return 1;
}

- if (env->migration_type == migrate_misfit)
+ if (env->migration_type == migrate_misfit ||
+ env->migration_type == migrate_llc_task)
return 1;

return 0;
@@ -12711,9 +12756,20 @@ static int active_load_balance_cpu_stop(void *data)
goto out_unlock;

/* Is there any task to move? */
- if (busiest_rq->nr_running <= 1)
- goto out_unlock;
+ if (busiest_rq->nr_running <= 1) {
+#ifdef CONFIG_SCHED_CACHE
+ int llc = llc_idx(target_cpu);

+ if (!sched_cache_enabled())
+ goto out_unlock;
+
+ if (llc < 0)
+ goto out_unlock;
+ /* don't migrate if no task prefers target */
+ if (busiest_rq->nr_pref_llc[llc] < 1)
+#endif
+ goto out_unlock;
+ }
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
--
2.32.0