In task waken up path, select idle CPU as backup for below two cases:
- If the cluster has idle CPU but all CPUs cannot meet task capacity requirement, then it obviously should fallback to use idle CPU;
- If the CPU opp is not stay at lowest OPP, so should spread task as possible so give more chance to decrease OPP and avoid too long scheduling latency if pack tasks onto same CPU.
Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6e7279c..9370b5b 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6317,12 +6317,13 @@ static inline int find_idlest_target(struct task_struct *p, bool boosted, static int energy_aware_select_candidate_cpu(struct task_struct *p, struct sched_group *sg) { - int i, cpu = -1; + int i, cpu = -1, best_idle_cpu = -1; int cap_idx = INT_MAX, idx; unsigned long task_util_boosted, new_util, wake_util; struct sched_domain *sd; const struct sched_group_energy *sge; int prev_cpu = task_cpu(p); + unsigned long best_idle_cpu_util = ULONG_MAX;
task_util_boosted = boosted_task_util(p);
@@ -6338,6 +6339,14 @@ static int energy_aware_select_candidate_cpu(struct task_struct *p, continue;
/* + * Find idle CPU as backup and bias to most recent sleep one + */ + if (idle_cpu(i) && wake_util < best_idle_cpu_util) { + best_idle_cpu = i; + best_idle_cpu_util = wake_util; + } + + /* * p's blocked utilization is still accounted for on prev_cpu * so prev_cpu will receive a negative bias due to the double * accounting. However, the blocked utilization may be zero. @@ -6352,6 +6361,7 @@ static int energy_aware_select_candidate_cpu(struct task_struct *p, if (new_util > capacity_orig_of(i)) continue;
+ /* * According to waken up task and CPU utilization, predict * the CPU OPP. So select CPU with two criterias from power @@ -6379,11 +6389,28 @@ static int energy_aware_select_candidate_cpu(struct task_struct *p,
/* Keep previous CPU and pack tasks if possible */ if (i == prev_cpu || - wake_util > cpu_rq(cpu)->cfs.avg.util_waken_avg) + wake_util > cpu_rq(cpu)->cfs.avg.util_waken_avg) { cpu = i; + } } }
+ /* directly return if has not found any idle CPU */ + if (best_idle_cpu == -1) + return cpu; + + /* + * Fallback to idle CPU for two cases: + * + * - Have not found proper target CPU but Have one idle CPU; + * - The target CPU is possible to increase OPP after migrate task on it + * but have on backup idle CPU; + */ + if (cpu == -1) + cpu = best_idle_cpu; + else if (!idle_cpu(cpu) && cap_idx > 0) + cpu = best_idle_cpu; + return cpu; }
-- 1.9.1