[PATCH v1 04/10] sched/fair: Rename variable names for sched averages

From: Yuyang Du
Date: Wed Aug 10 2016 - 14:40:30 EST


The names of sched averages (including load_avg and util_avg) have
been changed and added in the past a couple of years, some of the
names are a bit confusing especially to people who first read them.
This patch attempts to make the names more self-explaining. And some
comments are updated too.

The renames are listed as follows:

- init_entity_runnable_average() to init_entity_sched_avg()

- post_init_entity_util_avg() to post_init_entity_sched_avg()

- update_load_avg() to update_sched_avg()

- enqueue_entity_load_avg() to enqueue_entity_sched_avg()

- dequeue_entity_load_avg() to dequeue_entity_sched_avg()

- detach_entity_load_avg() to detach_entity_sched_avg()

- attach_entity_load_avg() to attach_entity_sched_avg()

- remove_entity_load_avg() to remove_entity_sched_avg()

- LOAD_AVG_PERIOD to SCHED_AVG_HALFLIFE

- LOAD_AVG_MAX_N to SCHED_AVG_MAX_N

- LOAD_AVG_MAX to SCHED_AVG_MAX

- runnable_avg_yN_sum[] to __accumulated_sum_N[]

- runnable_avg_yN_inv[] to __decay_inv_multiply_N[]

- __compute_runnable_contrib() to __accumulate_sum()

- decay_load() to __decay_sum()

Signed-off-by: Yuyang Du <yuyang.du@xxxxxxxxx>
---
include/linux/sched.h | 2 +-
kernel/sched/core.c | 4 +-
kernel/sched/fair.c | 275 ++++++++++++++++++++++++-------------------------
kernel/sched/sched.h | 6 +-
4 files changed, 143 insertions(+), 144 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67323aa..912830c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1229,7 +1229,7 @@ struct load_weight {

/*
* The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
+ * (see __update_sched_avg() in kernel/sched/fair.c).
*
* [load_avg definition]
*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c883fe..30d429b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2383,7 +2383,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_class = &fair_sched_class;
}

- init_entity_runnable_average(&p->se);
+ init_entity_sched_avg(&p->se);

/*
* The child is not yet in the pid-hash so no cgroup attach races,
@@ -2545,7 +2545,7 @@ void wake_up_new_task(struct task_struct *p)
__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
rq = __task_rq_lock(p, &rf);
- post_init_entity_util_avg(&p->se);
+ post_init_entity_sched_avg(&p->se);

activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dad1ba5..06819bb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -660,16 +660,18 @@ static int select_idle_sibling(struct task_struct *p, int cpu);
static unsigned long task_h_load(struct task_struct *p);

/*
- * We choose a half-life close to 1 scheduling period.
- * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
- * dependent on this value.
+ * Note: everything in sched average calculation, including
+ * __decay_inv_multiply_N, __accumulated_sum_N, __accumulated_sum_N32,
+ * SCHED_AVG_MAX, and SCHED_AVG_MAX_N, is dependent on and only on
+ * (1) exponential decay, (2) a period of 1024*1024ns (~1ms), and (3)
+ * a half-life of 32 periods.
*/
-#define LOAD_AVG_PERIOD 32
-#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
-#define LOAD_AVG_MAX_N 347 /* number of full periods to produce LOAD_AVG_MAX */
+#define SCHED_AVG_HALFLIFE 32 /* number of periods as a half-life */
+#define SCHED_AVG_MAX 47742 /* maximum possible sched avg */
+#define SCHED_AVG_MAX_N 347 /* number of full periods to produce SCHED_AVG_MAX */

/* Give new sched_entity start runnable values to heavy its load in infant time */
-void init_entity_runnable_average(struct sched_entity *se)
+void init_entity_sched_avg(struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;

@@ -681,7 +683,7 @@ void init_entity_runnable_average(struct sched_entity *se)
*/
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(se->load.weight);
- sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
+ sa->load_sum = sa->load_avg * SCHED_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway
*/
@@ -691,9 +693,9 @@ void init_entity_runnable_average(struct sched_entity *se)
}

static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
+static int update_cfs_rq_sched_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+static void attach_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);

/*
* With new tasks being created, their initial util_avgs are extrapolated
@@ -720,7 +722,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
* if util_avg > util_avg_cap.
*/
-void post_init_entity_util_avg(struct sched_entity *se)
+void post_init_entity_sched_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
@@ -738,7 +740,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
} else {
sa->util_avg = cap;
}
- sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+ sa->util_sum = sa->util_avg * SCHED_AVG_MAX;
}

if (entity_is_task(se)) {
@@ -747,8 +749,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
/*
* For !fair tasks do:
*
- update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
+ update_cfs_rq_sched_avg(now, cfs_rq, false);
+ attach_entity_sched_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
* such that the next switched_to_fair() has the
@@ -759,22 +761,16 @@ void post_init_entity_util_avg(struct sched_entity *se)
}
}

- tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
+ tg_update = update_cfs_rq_sched_avg(now, cfs_rq, false);
+ attach_entity_sched_avg(cfs_rq, se);
if (tg_update)
update_tg_load_avg(cfs_rq, false);
}

#else /* !CONFIG_SMP */
-void init_entity_runnable_average(struct sched_entity *se)
-{
-}
-void post_init_entity_util_avg(struct sched_entity *se)
-{
-}
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
-{
-}
+void init_entity_sched_avg(struct sched_entity *se) { }
+void post_init_entity_sched_avg(struct sched_entity *se) { }
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { }
#endif /* CONFIG_SMP */

/*
@@ -1839,7 +1835,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
*period = now - p->last_task_numa_placement;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
- *period = LOAD_AVG_MAX;
+ *period = SCHED_AVG_MAX;
}

p->last_sum_exec_runtime = runtime;
@@ -2583,7 +2579,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)

#ifdef CONFIG_SMP
/* Precomputed fixed inverse multiplies for multiplication by y^n */
-static const u32 runnable_avg_yN_inv[] = {
+static const u32 __decay_inv_multiply_N[] = {
0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
@@ -2596,7 +2592,7 @@ static const u32 runnable_avg_yN_inv[] = {
* Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
* over-estimates when re-combining.
*/
-static const u32 runnable_avg_yN_sum[] = {
+static const u32 __accumulated_sum_N[] = {
0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
@@ -2613,93 +2609,95 @@ static const u32 __accumulated_sum_N32[] = {
};

/*
- * Approximate:
- * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
+ * val * y^n, where y^m ~= 0.5
+ *
+ * n is the number of past periods; a period is ~1ms
+ * m is half-life in exponential decay; here it is SCHED_AVG_HALFLIFE=32.
*/
-static __always_inline u64 decay_load(u64 val, u64 n)
+static __always_inline u64 __decay_sum(u64 val, u64 n)
{
unsigned int local_n;

if (!n)
return val;
- else if (unlikely(n > LOAD_AVG_PERIOD * 63))
+ else if (unlikely(n > SCHED_AVG_HALFLIFE * 63))
return 0;

/* after bounds checking we can collapse to 32-bit */
local_n = n;

/*
- * As y^PERIOD = 1/2, we can combine
- * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
- * With a look-up table which covers y^n (n<PERIOD)
+ * As y^HALFLIFE = 1/2, we can combine
+ * y^n = 1/2^(n/HALFLIFE) * y^(n%HALFLIFE)
+ * With a look-up table which covers y^n (n<HALFLIFE)
*
- * To achieve constant time decay_load.
+ * To achieve constant time __decay_load.
*/
- if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
- val >>= local_n / LOAD_AVG_PERIOD;
- local_n %= LOAD_AVG_PERIOD;
+ if (unlikely(local_n >= SCHED_AVG_HALFLIFE)) {
+ val >>= local_n / SCHED_AVG_HALFLIFE;
+ local_n %= SCHED_AVG_HALFLIFE;
}

- val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
+ val = mul_u64_u32_shr(val, __decay_inv_multiply_N[local_n], 32);
return val;
}

/*
- * For updates fully spanning n periods, the contribution to runnable
- * average will be: \Sum 1024*y^n
+ * For updates fully spanning n periods, the accumulated contribution
+ * will be: \Sum 1024*y^n.
*
- * We can compute this reasonably efficiently by combining:
- * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
+ * We can compute this efficiently by combining:
+ * y^32 = 1/2 with precomputed \Sum 1024*y^n (where n < 32)
*/
-static u32 __compute_runnable_contrib(u64 n)
+static u32 __accumulate_sum(u64 n)
{
u32 contrib = 0;

- if (likely(n <= LOAD_AVG_PERIOD))
- return runnable_avg_yN_sum[n];
- else if (unlikely(n >= LOAD_AVG_MAX_N))
- return LOAD_AVG_MAX;
+ if (likely(n <= SCHED_AVG_HALFLIFE))
+ return __accumulated_sum_N[n];
+ else if (unlikely(n >= SCHED_AVG_MAX_N))
+ return SCHED_AVG_MAX;

- /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
- contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
- n %= LOAD_AVG_PERIOD;
- contrib = decay_load(contrib, n);
- return contrib + runnable_avg_yN_sum[n];
+ /* Since n < SCHED_AVG_MAX_N, n/SCHED_AVG_HALFLIFE < 11 */
+ contrib = __accumulated_sum_N32[n/SCHED_AVG_HALFLIFE];
+ n %= SCHED_AVG_HALFLIFE;
+ contrib = __decay_sum(contrib, n);
+ return contrib + __accumulated_sum_N[n];
}

#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)

/*
- * We can represent the historical contribution to runnable average as the
- * coefficients of a geometric series. To do this we sub-divide our runnable
- * history into segments of approximately 1ms (1024us); label the segment that
- * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
+ * We can represent the historical contribution to sched average as the
+ * coefficients of a geometric series. To do this we divide the history
+ * into segments of approximately 1ms (1024*1024ns); label the segment that
+ * occurred N-1024us ago p_N, with p_0 corresponding to the current period, e.g.
*
* [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
* p0 p1 p2
* (now) (~1ms ago) (~2ms ago)
*
- * Let u_i denote the fraction of p_i that the entity was runnable.
+ * Let u_i denote the fraction of p_i whose state (runnable/running) we count.
*
* We then designate the fractions u_i as our co-efficients, yielding the
- * following representation of historical load:
+ * following representation of a sched metric:
* u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
*
- * We choose y based on the with of a reasonably scheduling period, fixing:
- * y^32 = 0.5
+ * We choose y based on a half-life of 32 periods (which is ~32ms):
+ * y^32 = 0.5 => y = (0.5)^(1/32)
*
- * This means that the contribution to load ~32ms ago (u_32) will be weighted
- * approximately half as much as the contribution to load within the last ms
- * (u_0).
+ * where 32 is the number of periods that a past period's contribution is
+ * halved. This means that the impact of a period every ~32ms ago will be
+ * as much as 50% of the previous value.
*
* When a period "rolls over" and we have new u_0`, multiplying the previous
* sum again by y is sufficient to update:
- * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
- * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
+ * avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
+ * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
-__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int running, struct cfs_rq *cfs_rq)
+__update_sched_avg(u64 now, int cpu, struct sched_avg *sa,
+ unsigned long weight, int running, struct cfs_rq *cfs_rq)
{
u64 delta, scaled_delta, periods;
u32 contrib;
@@ -2759,15 +2757,15 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
periods = delta / 1024;
delta %= 1024;

- sa->load_sum = decay_load(sa->load_sum, periods + 1);
+ sa->load_sum = __decay_sum(sa->load_sum, periods + 1);
if (cfs_rq) {
cfs_rq->runnable_load_sum =
- decay_load(cfs_rq->runnable_load_sum, periods + 1);
+ __decay_sum(cfs_rq->runnable_load_sum, periods + 1);
}
- sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
+ sa->util_sum = __decay_sum((u64)(sa->util_sum), periods + 1);

/* Efficiently calculate \sum (1..n_period) 1024*y^i */
- contrib = __compute_runnable_contrib(periods);
+ contrib = __accumulate_sum(periods);
contrib = cap_scale(contrib, scale_freq);
if (weight) {
sa->load_sum += weight * contrib;
@@ -2791,12 +2789,12 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
sa->period_contrib += delta;

if (decayed) {
- sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
+ sa->load_avg = div_u64(sa->load_sum, SCHED_AVG_MAX);
if (cfs_rq) {
cfs_rq->runnable_load_avg =
- div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
+ div_u64(cfs_rq->runnable_load_sum, SCHED_AVG_MAX);
}
- sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
+ sa->util_avg = sa->util_sum / SCHED_AVG_MAX;
}

return decayed;
@@ -2864,8 +2862,8 @@ void set_task_rq_fair(struct sched_entity *se,
p_last_update_time = prev->avg.last_update_time;
n_last_update_time = next->avg.last_update_time;
#endif
- __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
- &se->avg, 0, 0, NULL);
+ __update_sched_avg(p_last_update_time, cpu_of(rq_of(prev)),
+ &se->avg, 0, 0, NULL);
se->avg.last_update_time = n_last_update_time;
}
}
@@ -2920,14 +2918,14 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
} while (0)

/**
- * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
+ * update_cfs_rq_sched_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_task()
* @cfs_rq: cfs_rq to update
* @update_freq: should we call cfs_rq_util_change() or will the call do so
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
* avg. The immediate corollary is that all (fair) tasks must be attached, see
- * post_init_entity_util_avg().
+ * post_init_entity_sched_avg().
*
* cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
*
@@ -2937,7 +2935,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
* avg up.
*/
static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+update_cfs_rq_sched_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
struct sched_avg *sa = &cfs_rq->avg;
int decayed, removed_load = 0, removed_util = 0;
@@ -2945,18 +2943,18 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+ sub_positive(&sa->load_sum, r * SCHED_AVG_MAX);
removed_load = 1;
}

if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ sub_positive(&sa->util_sum, r * SCHED_AVG_MAX);
removed_util = 1;
}

- decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+ decayed = __update_sched_avg(now, cpu_of(rq_of(cfs_rq)), sa,
scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);

#ifndef CONFIG_64BIT
@@ -2971,7 +2969,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
}

/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
+static inline void update_sched_avg(struct sched_entity *se, int update_tg)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
@@ -2982,23 +2980,23 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
- __update_load_avg(now, cpu, &se->avg,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ __update_sched_avg(now, cpu, &se->avg,
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);

- if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
+ if (update_cfs_rq_sched_avg(now, cfs_rq, true) && update_tg)
update_tg_load_avg(cfs_rq, 0);
}

/**
- * attach_entity_load_avg - attach this entity to its cfs_rq load avg
+ * attach_entity_sched_avg - attach this entity to its cfs_rq load avg
* @cfs_rq: cfs_rq to attach to
* @se: sched_entity to attach
*
- * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * Must call update_cfs_rq_sched_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void attach_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (!sched_feat(ATTACH_AGE_LOAD))
goto skip_aging;
@@ -3007,11 +3005,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* If we got migrated (either between CPUs or between cgroups) we'll
* have aged the average right before clearing @last_update_time.
*
- * Or we're fresh through post_init_entity_util_avg().
+ * Or we're fresh through post_init_entity_sched_avg().
*/
if (se->avg.last_update_time) {
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, 0, 0, NULL);
+ __update_sched_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+ &se->avg, 0, 0, NULL);

/*
* XXX: we could have just aged the entire load away if we've been
@@ -3030,18 +3028,18 @@ skip_aging:
}

/**
- * detach_entity_load_avg - detach this entity from its cfs_rq load avg
+ * detach_entity_sched_avg - detach this entity from its cfs_rq load avg
* @cfs_rq: cfs_rq to detach from
* @se: sched_entity to detach
*
- * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * Must call update_cfs_rq_sched_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
*/
-static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void detach_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ __update_sched_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+ &se->avg, se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);

sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
@@ -3053,7 +3051,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

/* Add the load generated by se into cfs_rq's load average */
static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+enqueue_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
u64 now = cfs_rq_clock_task(cfs_rq);
@@ -3061,18 +3059,18 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)

migrated = !sa->last_update_time;
if (!migrated) {
- __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ __update_sched_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);
}

- decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
+ decayed = update_cfs_rq_sched_avg(now, cfs_rq, !migrated);

cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;

if (migrated)
- attach_entity_load_avg(cfs_rq, se);
+ attach_entity_sched_avg(cfs_rq, se);

if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
@@ -3080,9 +3078,9 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)

/* Remove the runnable load generated by se from cfs_rq's runnable load average */
static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+dequeue_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- update_load_avg(se, 1);
+ update_sched_avg(se, 1);

cfs_rq->runnable_load_avg =
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
@@ -3115,24 +3113,25 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
-static void remove_entity_load_avg(struct sched_entity *se)
+static void remove_entity_sched_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;

/*
* tasks cannot exit without having gone through wake_up_new_task() ->
- * post_init_entity_util_avg() which will have added things to the
+ * post_init_entity_sched_avg() which will have added things to the
* cfs_rq, so we can remove unconditionally.
*
* Similarly for groups, they will have passed through
- * post_init_entity_util_avg() before unregister_sched_fair_group()
+ * post_init_entity_sched_avg() before unregister_sched_fair_group()
* calls this.
*/

last_update_time = cfs_rq_last_update_time(cfs_rq);

- __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+ __update_sched_avg(last_update_time, cpu_of(rq_of(cfs_rq)),
+ &se->avg, 0, 0, NULL);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
}
@@ -3152,12 +3151,12 @@ static int idle_balance(struct rq *this_rq);
#else /* CONFIG_SMP */

static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+update_cfs_rq_sched_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
return 0;
}

-static inline void update_load_avg(struct sched_entity *se, int not_used)
+static inline void update_sched_avg(struct sched_entity *se, int not_used)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct rq *rq = rq_of(cfs_rq);
@@ -3166,15 +3165,15 @@ static inline void update_load_avg(struct sched_entity *se, int not_used)
}

static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+enqueue_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
-static inline void remove_entity_load_avg(struct sched_entity *se) {}
+dequeue_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void remove_entity_sched_avg(struct sched_entity *se) {}

static inline void
-attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+attach_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
-detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+detach_entity_sched_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}

static inline int idle_balance(struct rq *rq)
{
@@ -3367,7 +3366,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;

- enqueue_entity_load_avg(cfs_rq, se);
+ enqueue_entity_sched_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);

@@ -3446,7 +3445,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- dequeue_entity_load_avg(cfs_rq, se);
+ dequeue_entity_sched_avg(cfs_rq, se);

if (schedstat_enabled())
update_stats_dequeue(cfs_rq, se, flags);
@@ -3526,7 +3525,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (schedstat_enabled())
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, 1);
+ update_sched_avg(se, 1);
}

update_stats_curr_start(cfs_rq, se);
@@ -3630,7 +3629,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
- update_load_avg(prev, 0);
+ update_sched_avg(prev, 0);
}
cfs_rq->curr = NULL;
}
@@ -3646,7 +3645,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, 1);
+ update_sched_avg(curr, 1);
update_cfs_shares(cfs_rq);

#ifdef CONFIG_SCHED_HRTICK
@@ -4535,7 +4534,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;

- update_load_avg(se, 1);
+ update_sched_avg(se, 1);
update_cfs_shares(cfs_rq);
}

@@ -4594,7 +4593,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;

- update_load_avg(se, 1);
+ update_sched_avg(se, 1);
update_cfs_shares(cfs_rq);
}

@@ -5496,7 +5495,7 @@ static void migrate_task_rq_fair(struct task_struct *p)
* will result in the wakee task is less decayed, but giving the wakee more
* load sounds not bad.
*/
- remove_entity_load_avg(&p->se);
+ remove_entity_sched_avg(&p->se);

/* Tell new CPU we are migrated */
p->se.avg.last_update_time = 0;
@@ -5507,7 +5506,7 @@ static void migrate_task_rq_fair(struct task_struct *p)

static void task_dead_fair(struct task_struct *p)
{
- remove_entity_load_avg(&p->se);
+ remove_entity_sched_avg(&p->se);
}
#endif /* CONFIG_SMP */

@@ -6388,7 +6387,7 @@ static void update_blocked_averages(int cpu)
if (throttled_hierarchy(cfs_rq))
continue;

- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
+ if (update_cfs_rq_sched_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
update_tg_load_avg(cfs_rq, 0);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -6449,7 +6448,7 @@ static inline void update_blocked_averages(int cpu)

raw_spin_lock_irqsave(&rq->lock, flags);
update_rq_clock(rq);
- update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
+ update_cfs_rq_sched_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

@@ -8453,8 +8452,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
}

/* Catch up with the cfs_rq and remove our load when we leave */
- tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
- detach_entity_load_avg(cfs_rq, se);
+ tg_update = update_cfs_rq_sched_avg(now, cfs_rq, false);
+ detach_entity_sched_avg(cfs_rq, se);
if (tg_update)
update_tg_load_avg(cfs_rq, false);
}
@@ -8475,8 +8474,8 @@ static void attach_task_cfs_rq(struct task_struct *p)
#endif

/* Synchronize task with its cfs_rq */
- tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
+ tg_update = update_cfs_rq_sched_avg(now, cfs_rq, false);
+ attach_entity_sched_avg(cfs_rq, se);
if (tg_update)
update_tg_load_avg(cfs_rq, false);

@@ -8621,7 +8620,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)

init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
- init_entity_runnable_average(se);
+ init_entity_sched_avg(se);
}

return 1;
@@ -8643,7 +8642,7 @@ void online_fair_sched_group(struct task_group *tg)
se = tg->se[i];

raw_spin_lock_irq(&rq->lock);
- post_init_entity_util_avg(se);
+ post_init_entity_sched_avg(se);
sync_throttle(tg, i);
raw_spin_unlock_irq(&rq->lock);
}
@@ -8657,7 +8656,7 @@ void unregister_fair_sched_group(struct task_group *tg)

for_each_possible_cpu(cpu) {
if (tg->se[cpu])
- remove_entity_load_avg(tg->se[cpu]);
+ remove_entity_sched_avg(tg->se[cpu]);

/*
* Only empty task groups can be destroyed; so we can speculatively
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c64fc51..132a0fa 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1325,8 +1325,8 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);

unsigned long to_ratio(u64 period, u64 runtime);

-extern void init_entity_runnable_average(struct sched_entity *se);
-extern void post_init_entity_util_avg(struct sched_entity *se);
+extern void init_entity_sched_avg(struct sched_entity *se);
+extern void post_init_entity_sched_avg(struct sched_entity *se);

#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(struct rq *rq);
@@ -1768,7 +1768,7 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
* @max: Utilization ceiling.
*
* This function is called by the scheduler on every invocation of
- * update_load_avg() on the CPU whose utilization is being updated.
+ * update_sched_avg() on the CPU whose utilization is being updated.
*
* It can only be called from RCU-sched read-side critical sections.
*/
--
1.7.9.5