[PATCH 08/10] sched,fair: refactor enqueue/dequeue_entity

From: Rik van Riel
Date: Fri Jun 28 2019 - 16:49:57 EST


Refactor enqueue_entity, dequeue_entity, and update_load_avg, in order
to split out the things we still want to happen at every level in the
cgroup hierarchy with a flat runqueue from the things we only need to
happen once.

No functional changes.

Signed-off-by: Rik van Riel <riel@xxxxxxxxxxx>
---
kernel/sched/fair.c | 64 +++++++++++++++++++++++++++++----------------
1 file changed, 42 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8da2823401ca..a751e7a9b228 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3480,7 +3480,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
#define DO_ATTACH 0x4

/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static inline bool update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 now = cfs_rq_clock_pelt(cfs_rq);
int decayed;
@@ -3509,6 +3509,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

} else if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
+
+ return decayed;
}

#ifndef CONFIG_64BIT
@@ -3725,9 +3727,10 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0

-static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
+static inline bool update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
cfs_rq_util_change(cfs_rq, 0);
+ return false;
}

static inline void remove_entity_load_avg(struct sched_entity *se) {}
@@ -3850,6 +3853,24 @@ static inline void check_schedstat_required(void)
* CPU and an up-to-date min_vruntime on the destination CPU.
*/

+static bool
+enqueue_entity_groups(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+{
+ /*
+ * When enqueuing a sched_entity, we must:
+ * - Update loads to have both entity and cfs_rq synced with now.
+ * - Add its load to cfs_rq->runnable_avg
+ * - For group_entity, update its weight to reflect the new share of
+ * its group cfs_rq
+ * - Add its new weight to cfs_rq->load.weight
+ */
+ if (!update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH))
+ return false;
+
+ update_cfs_group(se);
+ return true;
+}
+
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -3874,16 +3895,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;

- /*
- * When enqueuing a sched_entity, we must:
- * - Update loads to have both entity and cfs_rq synced with now.
- * - Add its load to cfs_rq->runnable_avg
- * - For group_entity, update its weight to reflect the new share of
- * its group cfs_rq
- * - Add its new weight to cfs_rq->load.weight
- */
- update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
- update_cfs_group(se);
enqueue_runnable_load_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);

@@ -3950,14 +3961,9 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)

static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);

-static void
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static bool
+dequeue_entity_groups(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
-
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
@@ -3966,7 +3972,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(cfs_rq, se, UPDATE_TG);
+ if (!update_load_avg(cfs_rq, se, UPDATE_TG))
+ return false;
+ update_cfs_group(se);
+
+ return true;
+}
+
+static void
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+{
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
dequeue_runnable_load_avg(cfs_rq, se);

update_stats_dequeue(cfs_rq, se, flags);
@@ -3990,8 +4010,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);

- update_cfs_group(se);
-
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
@@ -5156,6 +5174,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
+ enqueue_entity_groups(cfs_rq, se, flags);
enqueue_entity(cfs_rq, se, flags);

/*
@@ -5238,6 +5257,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
+ dequeue_entity_groups(cfs_rq, se, flags);
dequeue_entity(cfs_rq, se, flags);

/*
--
2.20.1