Re: [PATCH v5 2/3] sched/topology: Rework CPU capacity asymmetry detection

From: Beata Michalska
Date: Wed May 26 2021 - 17:43:15 EST


On Wed, May 26, 2021 at 08:17:25PM +0200, Dietmar Eggemann wrote:
> On 26/05/2021 14:15, Beata Michalska wrote:
> > On Wed, May 26, 2021 at 11:52:25AM +0200, Dietmar Eggemann wrote:
> >> On 25/05/2021 12:29, Beata Michalska wrote:
> >>> On Tue, May 25, 2021 at 10:53:07AM +0100, Valentin Schneider wrote:
> >>>> On 24/05/21 23:55, Beata Michalska wrote:
> >>>>> On Mon, May 24, 2021 at 07:01:04PM +0100, Valentin Schneider wrote:
> >>>>>> On 24/05/21 11:16, Beata Michalska wrote:
>
> [...]
>
> >> static inline int
> >> asym_cpu_capacity_classify(struct sched_domain *sd,
> >> const struct cpumask *cpu_map)
> >> {
> >> int sd_span_match = 0, cpu_map_match = 0, flags = 0;
> >> struct asym_cap_data *entry;
> >>
> >> list_for_each_entry(entry, &asym_cap_list, link) {
> >> if (cpumask_intersects(sched_domain_span(sd), entry->cpu_mask))
> >> ++sd_span_match;
> >> else if (cpumask_intersects(cpu_map, entry->cpu_mask))
> >> ++cpu_map_match;
> >> }
> >>
> >> WARN_ON_ONCE(!sd_span_match);
> >>
> >> if (sd_span_match > 1) {
> >> flags |= SD_ASYM_CPUCAPACITY;
> >> if (!cpu_map_match)
> >> flags |= SD_ASYM_CPUCAPACITY_FULL;
> >> }
> >>
> >> return flags;
> >> }
> > So I planned to drop the list_is_singular check as it is needless really.
> > Otherwise, I am not really convinced by the suggestion. I could add comments
> > around current version to make it more ..... 'digestible' but I'd rather
> > stay with it as it seems more compact to me (subjective).
>
> You could pass in `const struct cpumask *sd_span` instead of `struct
> sched_domain *sd` though. To make it clear that both masks are used to
> compare against the cpumasks of the asym_cap_list entries.
>
I could definitely do that, though if I switch to arrays for CPUs masks,
it might get a bit confusing again.
No strong preferences here though. Can do either or both.

Thanks.

---
BR
B.
> static inline int
> -asym_cpu_capacity_classify(struct sched_domain *sd,
> +asym_cpu_capacity_classify(const struct cpumask *sd_span,
> const struct cpumask *cpu_map)
> {
> int sd_asym_flags = SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
> @@ -1377,14 +1378,14 @@ asym_cpu_capacity_classify(struct sched_domain *sd,
> goto leave;
>
> list_for_each_entry(entry, &asym_cap_list, link) {
> - if (cpumask_intersects(sched_domain_span(sd), entry->cpu_mask)) {
> + if (cpumask_intersects(sd_span, entry->cpu_mask)) {
> ++asym_cap_count;
> } else {
> /*
> * CPUs with given capacity might be offline
> * so make sure this is not the case
> */
> - if (cpumask_intersects(entry->cpu_mask, cpu_map)) {
> + if (cpumask_intersects(cpu_map, entry->cpu_mask)) {
> sd_asym_flags &= ~SD_ASYM_CPUCAPACITY_FULL;
> if (asym_cap_count > 1)
> break;
> @@ -1395,7 +1396,6 @@ asym_cpu_capacity_classify(struct sched_domain *sd,
> leave:
> return asym_cap_count > 1 ? sd_asym_flags : 0;
> }
> -#endif
>
> static inline struct asym_cap_data *
> asym_cpu_capacity_get_data(unsigned long capacity)
> @@ -1589,6 +1589,7 @@ sd_init(struct sched_domain_topology_level *tl,
> struct sd_data *sdd = &tl->data;
> struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
> int sd_id, sd_weight, sd_flags = 0;
> + struct cpumask *sd_span;
>
> #ifdef CONFIG_NUMA
> /*
> @@ -1636,10 +1637,11 @@ sd_init(struct sched_domain_topology_level *tl,
> #endif
> };
>
> - cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
> - sd_id = cpumask_first(sched_domain_span(sd));
> + sd_span = sched_domain_span(sd);
> + cpumask_and(sd_span, cpu_map, tl->mask(cpu));
> + sd_id = cpumask_first(sd_span);
>
> - sd->flags |= asym_cpu_capacity_classify(sd, cpu_map);
> + sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
> /*
> * Convert topological properties into behaviour.
> */