eenv::sg_cap is used to indicate which sched_group CPUs are bound to the same clock domain, as result the final capacity needs to consider all CPUs in eenv::sg_cap. Now eenv::sg_cap is set in the middle of flow for every sched_group energy calculation, but this is not necessary and we can prepare its value ahead energy computation.
This patch moves eenv::sg_cap setting from sched_group_energy() to function compute_task_energy(), so can prepare it ahead energy computation and this can avoid to set it repeatedly.
Change-Id: I9e231fdc26bbf61aa09894c9ea0532b3aa2f3f6b Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 213efaf..d7bb3a8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5738,20 +5738,9 @@ static int compute_energy(struct energy_env *eenv, int candidate) cpu_count = cpumask_weight(&visit_cpus);
while (!cpumask_empty(&visit_cpus)) { - struct sched_group *sg_shared_cap = NULL; int cpu = cpumask_first(&visit_cpus); struct sched_domain *sd;
- /* - * Is the group utilization affected by cpus outside this - * sched_group? - * This sd may have groups with cpus which were not present - * when we took visit_cpus. - */ - sd = rcu_dereference(per_cpu(sd_scs, cpu)); - if (sd && sd->parent) - sg_shared_cap = sd->parent->groups; - for_each_domain(cpu, sd) { struct sched_group *sg = sd->groups;
@@ -5760,10 +5749,6 @@ static int compute_energy(struct energy_env *eenv, int candidate) break;
do { - eenv->sg_cap = sg; - if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight) - eenv->sg_cap = sg_shared_cap; - /* * Compute the energy for all the candidate * CPUs in the current visited SG. @@ -5820,8 +5805,9 @@ static int compute_energy(struct energy_env *eenv, int candidate) */ static int compute_task_energy(struct energy_env *eenv, int cpu) { - struct sched_domain *sd; + struct sched_domain *sd, *sd_cap; struct sched_group *sg; + int first_cpu;
sd = rcu_dereference(per_cpu(sd_ea, cpu)); if (!sd) @@ -5834,6 +5820,20 @@ static int compute_task_energy(struct energy_env *eenv, int cpu) continue;
eenv->sg_top = sg; + + first_cpu = cpumask_first(sched_group_cpus(sg)); + + /* + * The CPU capacity sharing attribution is decided by hardhware + * design so we can decide the sg_cp value at the beginning + * for specific CPU. + */ + sd_cap = rcu_dereference(per_cpu(sd_scs, first_cpu)); + if (sd_cap && sd_cap->parent) + eenv->sg_cap = sd_cap->parent->groups; + else + eenv->sg_cap = sd_cap->groups; + /* energy is unscaled to reduce rounding errors */ if (compute_energy(eenv, cpu) == -EINVAL) { eenv->next_cpu = eenv->prev_cpu; -- 1.9.1