[PATCH 2/3] sched: introduce per-cpu var next_cpu to track search limit

From: subhra mazumdar
Date: Mon Apr 23 2018 - 20:39:35 EST


Introduce a per-cpu variable to track the limit upto which idle cpu search
was done in select_idle_cpu(). This will help to start the search next time
from there. This is necessary for rotating the search window over entire
LLC domain.

Signed-off-by: subhra mazumdar <subhra.mazumdar@xxxxxxxxxx>
---
kernel/sched/core.c | 2 ++
kernel/sched/sched.h | 1 +
2 files changed, 3 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5e10aae..cd5c08d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -17,6 +17,7 @@
#include <trace/events/sched.h>

DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU_SHARED_ALIGNED(int, next_cpu);

#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
/*
@@ -6018,6 +6019,7 @@ void __init sched_init(void)
struct rq *rq;

rq = cpu_rq(i);
+ per_cpu(next_cpu, i) = -1;
raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3f1874c..a2db041 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -902,6 +902,7 @@ extern struct static_key_false sched_smt_present;
#endif

DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU_SHARED_ALIGNED(int, next_cpu);

#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() this_cpu_ptr(&runqueues)
--
2.9.3