This patch is to add new function energy_aware_select_candidate_cpu(), this function is to select candidate CPU from schedule group. This function uses the same logic with before, but we create this dedicated function so can help sequential optimization.
Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 73 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 30 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ecc156c..47f6365 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6311,14 +6311,51 @@ static inline int find_idlest_target(struct task_struct *p, bool boosted, return target_cpu; }
+static int energy_aware_select_candidate_cpu(struct task_struct *p, + struct sched_group *sg) +{ + int i, cpu = -1; + unsigned long task_util_boosted, new_util; + + task_util_boosted = boosted_task_util(p); + + /* Find cpu with sufficient capacity */ + for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) { + /* + * p's blocked utilization is still accounted for on prev_cpu + * so prev_cpu will receive a negative bias due to the double + * accounting. However, the blocked utilization may be zero. + */ + new_util = cpu_util(i) + task_util_boosted; + + /* + * Ensure minimum capacity to grant the required boost. + * The target CPU can be already at a capacity level higher + * than the one required to boost the task. + */ + if (new_util > capacity_orig_of(i)) + continue; + + if (new_util < capacity_curr_of(i)) { + cpu = i; + if (cpu_rq(i)->nr_running) + break; + } + + /* cpu has capacity at higher OPP, keep it as fallback */ + if (cpu == task_cpu(p)) + cpu = i; + } + + return cpu; +} + static inline int find_nrg_efficient_target(struct task_struct *p, struct sched_domain *sd) { struct sched_group *sg, *sg_target; int target_max_cap = INT_MAX; - int target_cpu = task_cpu(p); - unsigned long task_util_boosted, new_util; - int i; + int target_cpu = task_cpu(p), cpu;
sg = sd->groups; sg_target = sg; @@ -6346,34 +6383,10 @@ static inline int find_nrg_efficient_target(struct task_struct *p, } } while (sg = sg->next, sg != sd->groups);
- task_util_boosted = boosted_task_util(p); - /* Find cpu with sufficient capacity */ - for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) { - /* - * p's blocked utilization is still accounted for on prev_cpu - * so prev_cpu will receive a negative bias due to the double - * accounting. However, the blocked utilization may be zero. - */ - new_util = cpu_util(i) + task_util_boosted; - - /* - * Ensure minimum capacity to grant the required boost. - * The target CPU can be already at a capacity level higher - * than the one required to boost the task. - */ - if (new_util > capacity_orig_of(i)) - continue;
- if (new_util < capacity_curr_of(i)) { - target_cpu = i; - if (cpu_rq(i)->nr_running) - break; - } - - /* cpu has capacity at higher OPP, keep it as fallback */ - if (target_cpu == task_cpu(p)) - target_cpu = i; - } + cpu = energy_aware_select_candidate_cpu(p, sg_target); + if (cpu != -1) + target_cpu = cpu;
return target_cpu; } -- 1.9.1