Most of the sched domain structure gets initialized from sd_init() and it looks reasonable to initialize span_weight too from it.
Currently it is getting initialized from build_sched_domains(), which doesn't looks to be the ideal place for doing so.
With this change we need to additionally reset span_weight for a special error case, but that looks reasonable as span_weight must be updated every time domain span is updated.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/topology.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 09a56ca76bd1..691b290a679e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -892,6 +892,7 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies, .balance_interval = sd_weight, + .span_weight = sd_weight, .smt_gain = 0, .max_newidle_lb_cost = 0, .next_decay_max_lb_cost = jiffies, @@ -1373,6 +1374,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve cpumask_or(sched_domain_span(sd), sched_domain_span(sd), sched_domain_span(child)); + sd->span_weight = cpumask_weight(sched_domain_span(sd)); }
} @@ -1417,7 +1419,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att /* Build the groups for the domains */ for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - sd->span_weight = cpumask_weight(sched_domain_span(sd)); if (sd->flags & SD_OVERLAP) { if (build_overlap_sched_groups(sd, i)) goto error;