[PATCH v3 1/6] sched/topology: Introduce sched_group::flags

From: Ricardo Neri
Date: Thu May 13 2021 - 11:50:30 EST


There exist situations in which the load balance needs to know the
properties of the CPUs in a scheduling group. When using asymmetric
packing, for instance, the load balancer needs to know not only the
state of dst_cpu but also of its SMT siblings, if any.

Use the flags of the child scheduling domains to initialize scheduling
group flags. This will reflect the properties of the CPUs in the
group.

Cc: Aubrey Li <aubrey.li@xxxxxxxxx>
Cc: Ben Segall <bsegall@xxxxxxxxxx>
Cc: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Cc: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxx>
Cc: Quentin Perret <qperret@xxxxxxxxxx>
Cc: Srinivas Pandruvada <srinivas.pandruvada@xxxxxxxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Originally-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@xxxxxxxxxxxxxxx>
---
Changes since v2:
* Introduced this patch.

Changes since v1:
* N/A
---
kernel/sched/sched.h | 1 +
kernel/sched/topology.c | 7 +++++--
2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8f0194cee0ba..1afd82c1743d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1795,6 +1795,7 @@ struct sched_group {
unsigned int group_weight;
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* CPU of highest priority in group */
+ int flags;

/*
* The CPUs this group covers.
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 053115b55f89..1caab1a5e5c8 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -916,10 +916,12 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
return NULL;

sg_span = sched_group_span(sg);
- if (sd->child)
+ if (sd->child) {
cpumask_copy(sg_span, sched_domain_span(sd->child));
- else
+ sg->flags = sd->child->flags;
+ } else {
cpumask_copy(sg_span, sched_domain_span(sd));
+ }

atomic_inc(&sg->ref);
return sg;
@@ -1169,6 +1171,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
if (child) {
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
+ sg->flags = child->flags;
} else {
cpumask_set_cpu(cpu, sched_group_span(sg));
cpumask_set_cpu(cpu, group_balance_mask(sg));
--
2.17.1