[RFC PATCH 03/14] sched: aggregate load contributed by task entitieson parenting cfs_rq

From: Paul Turner
Date: Wed Feb 01 2012 - 20:46:51 EST


For a given task t, we can compute its contribution to load as:
task_load(t) = runnable_avg(t) * weight(t)

On a parenting cfs_rq we can then aggregate
runnable_load(cfs_rq) = \Sum task_load(t), for all runnable children t

Maintain this bottom up, with task entities adding their contributed load to
the parenting cfs_rq sum. When a task entities load changes we add the same
delta to the maintained sum.

Signed-off-by: Paul Turner <pjt@xxxxxxxxxx>
Signed-off-by: Ben Segall <bsegall@xxxxxxxxxx>
---
include/linux/sched.h | 1 +
kernel/sched/debug.c | 3 +++
kernel/sched/fair.c | 52 +++++++++++++++++++++++++++++++++++++++++++++----
kernel/sched/sched.h | 2 ++
4 files changed, 54 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 91599c8..f2999f0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1159,6 +1159,7 @@ struct load_weight {
struct sched_avg {
u64 runnable_avg_sum, runnable_avg_period;
u64 last_runnable_update;
+ unsigned long load_avg_contrib;
};

#ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 43e3162..5a55d26 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -93,6 +93,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
P(se->load.weight);
P(se->avg.runnable_avg_sum);
P(se->avg.runnable_avg_period);
+ P(se->avg.load_avg_contrib);
#undef PN
#undef P
}
@@ -222,6 +223,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->load_contribution);
SEQ_printf(m, " .%-30s: %d\n", "load_tg",
atomic_read(&cfs_rq->tg->load_weight));
+ SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg",
+ cfs_rq->runnable_load_avg);
#endif

print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8fa199f..bcdad5d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1080,20 +1080,64 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
return decayed;
}

+/* Compute the current contribution to load_avg by se, return any delta */
+static long __update_entity_load_avg_contrib(struct sched_entity *se)
+{
+ long old_contrib = se->avg.load_avg_contrib;
+
+ if (!entity_is_task(se))
+ return 0;
+
+ se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum *
+ se->load.weight,
+ se->avg.runnable_avg_period + 1);
+
+ return se->avg.load_avg_contrib - old_contrib;
+}
+
/* Update a sched_entity's runnable average */
static inline void update_entity_load_avg(struct sched_entity *se)
{
- __update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg,
- se->on_rq);
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ long contrib_delta;
+
+ if(!__update_entity_runnable_avg(rq_of(cfs_rq)->clock_task, &se->avg,
+ se->on_rq))
+ return;
+
+ contrib_delta = __update_entity_load_avg_contrib(se);
+ if (se->on_rq)
+ cfs_rq->runnable_load_avg += contrib_delta;
}

static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
__update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
}
+
+/* Add the load generated by se into cfs_rq's child load-average */
+static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se)
+{
+ update_entity_load_avg(se);
+ cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
+}
+
+/* Remove se's load from this cfs_rq child load-average */
+static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se)
+{
+ update_entity_load_avg(se);
+ cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
+}
+
#else
static inline void update_entity_load_avg(struct sched_entity *se) {}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se {}
+static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se) {}
#endif

static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1220,7 +1264,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
update_curr(cfs_rq);
update_cfs_load(cfs_rq, 0);
- update_entity_load_avg(se);
+ enqueue_entity_load_avg(se);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);

@@ -1295,7 +1339,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- update_entity_load_avg(se);
+ dequeue_entity_load_avg(cfs_rq, se);

update_stats_dequeue(cfs_rq, se);
if (flags & DEQUEUE_SLEEP) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 42b6df6..77a3427 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -270,6 +270,8 @@ struct cfs_rq {
u64 load_stamp, load_last, load_unacc_exec_time;

unsigned long load_contribution;
+
+ u64 runnable_load_avg;
#endif /* CONFIG_SMP */
#ifdef CONFIG_CFS_BANDWIDTH
int runtime_enabled;


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/