* Ingo Molnar <mingo@xxxxxxx> wrote:
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 key;
+ u64 key;
/*
* Are we enqueueing a waiting task? (for current tasks
@@ -442,26 +448,7 @@ static void update_stats_enqueue(struct
/*
* Update the key:
*/
- key = cfs_rq->fair_clock;
-
- /*
- * Optimize the common nice 0 case:
- */
- if (likely(se->load.weight == NICE_0_LOAD)) {
- key -= se->wait_runtime;
- } else {
- u64 tmp;
-
- if (se->wait_runtime < 0) {
- tmp = -se->wait_runtime;
- key += (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- } else {
- tmp = se->wait_runtime;
- key -= (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- }
- }
+ key = se->exec_runtime;
se->fair_key = key;
}
@@ -583,6 +570,20 @@ static void __enqueue_sleeper(struct cfs
cfs_rq->sleeper_bonus += delta_fair;
}
+/*
+ * Newly woken tasks are put into the "middle" of all runnable
+ * task's current runtime:
+ */
+static u64 avg_exec_runtime(struct cfs_rq *cfs_rq)
+{
+ u64 avg_exec_runtime = cfs_rq->exec_runtime;
+
+ if (cfs_rq->nr_running)
+ do_div(avg_exec_runtime, cfs_rq->nr_running);
+
+ return avg_exec_runtime;
+}
+
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct task_struct *tsk = task_of(se);
@@ -640,8 +641,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
*/
update_curr(cfs_rq);
- if (wakeup)
+ if (wakeup) {
+ se->exec_runtime = avg_exec_runtime(cfs_rq);
enqueue_sleeper(cfs_rq, se);
+ }
update_stats_enqueue(cfs_rq, se);
__enqueue_entity(cfs_rq, se);
@@ -1126,6 +1129,7 @@ static void task_new_fair(struct rq *rq,
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
+ se->exec_runtime = avg_exec_runtime(cfs_rq);
__enqueue_entity(cfs_rq, se);
}