[RFC][PATCH 05/14] sched/fair: Change update_load_avg() arguments

From: Peter Zijlstra
Date: Fri May 12 2017 - 13:22:32 EST


Most call sites of update_load_avg() already have cfs_rq_of(se)
available, pass it down instead of recomputing it.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 31 +++++++++++++++----------------
1 file changed, 15 insertions(+), 16 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3349,9 +3349,8 @@ update_cfs_rq_load_avg(u64 now, struct c
#define SKIP_AGE_LOAD 0x2

/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int flags)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
@@ -3512,9 +3511,9 @@ update_cfs_rq_load_avg(u64 now, struct c
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0

-static inline void update_load_avg(struct sched_entity *se, int not_used1)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
- cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
+ cpufreq_update_util(rq_of(cfs_rq), 0);
}

static inline void
@@ -3665,7 +3664,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
*/
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
enqueue_entity_load_avg(cfs_rq, se);
update_cfs_shares(se);
account_entity_enqueue(cfs_rq, se);
@@ -3749,7 +3748,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
dequeue_entity_load_avg(cfs_rq, se);

update_stats_dequeue(cfs_rq, se, flags);
@@ -3837,7 +3836,7 @@ set_next_entity(struct cfs_rq *cfs_rq, s
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}

update_stats_curr_start(cfs_rq, se);
@@ -3939,7 +3938,7 @@ static void put_prev_entity(struct cfs_r
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
- update_load_avg(prev, 0);
+ update_load_avg(cfs_rq, prev, 0);
}
cfs_rq->curr = NULL;
}
@@ -3955,7 +3954,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, UPDATE_TG);
+ update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_shares(curr);

#ifdef CONFIG_SCHED_HRTICK
@@ -4873,7 +4872,7 @@ enqueue_task_fair(struct rq *rq, struct
if (cfs_rq_throttled(cfs_rq))
break;

- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_shares(se);
}

@@ -4932,7 +4931,7 @@ static void dequeue_task_fair(struct rq
if (cfs_rq_throttled(cfs_rq))
break;

- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
update_cfs_shares(se);
}

@@ -7030,7 +7029,7 @@ static void update_blocked_averages(int
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq_of(se), se, 0);

/*
* There can be a lot of idle CPU cgroups. Don't let fully
@@ -9159,7 +9158,7 @@ static void propagate_entity_cfs_rq(stru
if (cfs_rq_throttled(cfs_rq))
break;

- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}
}
#else
@@ -9171,7 +9170,7 @@ static void detach_entity_cfs_rq(struct
struct cfs_rq *cfs_rq = cfs_rq_of(se);

/* Catch up with the cfs_rq and remove our load when we leave */
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9190,7 +9189,7 @@ static void attach_entity_cfs_rq(struct
#endif

/* Synchronize entity with its cfs_rq */
- update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9474,7 +9473,7 @@ int sched_group_set_shares(struct task_g
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
update_cfs_shares(se);
}
rq_unlock_irqrestore(rq, &rf);