[RFC patch v3 09/20] sched: Introduce task preferred LLC field
From: Tim Chen
Date: Wed Jun 18 2025 - 14:23:27 EST
With cache aware scheduling enabled, each process is assigned
a preferred LLC id, which will be used to quickly identify
the LLC domain this thread prefers to run. This is similar to
numa_preferred_nid for NUMA balance.
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
include/linux/sched.h | 1 +
init/init_task.c | 3 +++
kernel/sched/fair.c | 7 +++++++
3 files changed, 11 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7ce95a32e9ff..2f1cb7445733 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1404,6 +1404,7 @@ struct task_struct {
#ifdef CONFIG_SCHED_CACHE
struct callback_head cache_work;
+ int preferred_llc;
#endif
#ifdef CONFIG_RSEQ
diff --git a/init/init_task.c b/init/init_task.c
index e557f622bd90..5fffbe766f57 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -188,6 +188,9 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
.numa_group = NULL,
.numa_faults = NULL,
#endif
+#ifdef CONFIG_SCHED_CACHE
+ .preferred_llc = -1,
+#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
.kasan_depth = 1,
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5549710d95cf..cc804a8c7061 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1267,6 +1267,7 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
struct mm_struct *mm = p->mm;
struct mm_sched *pcpu_sched;
unsigned long epoch;
+ int mm_sched_llc = -1;
/*
* init_task and kthreads don't be having no mm
@@ -1293,6 +1294,12 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
mm->mm_sched_cpu = -1;
pcpu_sched->occ = 0;
}
+
+ if (mm->mm_sched_cpu != -1)
+ mm_sched_llc = per_cpu(sd_llc_id, mm->mm_sched_cpu);
+
+ if (p->preferred_llc != mm_sched_llc)
+ p->preferred_llc = mm_sched_llc;
}
static void task_tick_cache(struct rq *rq, struct task_struct *p)
--
2.32.0