pick_next_task_dl() and build_sched_domain() aren't used outside deadline.c and topology.c.
Make them static.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/deadline.c | 2 +- kernel/sched/topology.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a2ce59015642..ce9679a633f5 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1168,7 +1168,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, return rb_entry(left, struct sched_dl_entity, rb_node); }
-struct task_struct * +static struct task_struct * pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct sched_dl_entity *dl_se; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1b0b4fb12837..09a56ca76bd1 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1351,7 +1351,7 @@ static void __sdt_free(const struct cpumask *cpu_map) } }
-struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, +static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu) {
Most of the sched domain structure gets initialized from sd_init() and it looks reasonable to initialize span_weight too from it.
Currently it is getting initialized from build_sched_domains(), which doesn't looks to be the ideal place for doing so.
With this change we need to additionally reset span_weight for a special error case, but that looks reasonable as span_weight must be updated every time domain span is updated.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/topology.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 09a56ca76bd1..691b290a679e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -892,6 +892,7 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies, .balance_interval = sd_weight, + .span_weight = sd_weight, .smt_gain = 0, .max_newidle_lb_cost = 0, .next_decay_max_lb_cost = jiffies, @@ -1373,6 +1374,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve cpumask_or(sched_domain_span(sd), sched_domain_span(sd), sched_domain_span(child)); + sd->span_weight = cpumask_weight(sched_domain_span(sd)); }
} @@ -1417,7 +1419,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att /* Build the groups for the domains */ for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - sd->span_weight = cpumask_weight(sched_domain_span(sd)); if (sd->flags & SD_OVERLAP) { if (build_overlap_sched_groups(sd, i)) goto error;
linaro-kernel@lists.linaro.org