In previous EAS waken up path, it will select any possible CPU which have higher capacity which can meet the task requirement. This patch will prefer to fall back to previous CPU as possible. So this can avoid unnecessary task migration between the cluster.
Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index efa516d..724b36c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5718,7 +5718,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) struct sched_domain *sd; struct sched_group *sg, *sg_target; int target_max_cap = INT_MAX; - int target_cpu = task_cpu(p); + int target_cpu = -1; unsigned long task_util_boosted, new_util; int i;
@@ -5787,10 +5787,18 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) break; }
- /* cpu has capacity at higher OPP, keep it as fallback */ - if (target_cpu == task_cpu(p)) + /* + * cpu has capacity at higher OPP, keep it as fallback; + * give the previous cpu more chance to run + */ + if (task_cpu(p) == i || target_cpu == -1) target_cpu = i; } + + /* If have not select any CPU, then to use previous CPU */ + if (target_cpu == -1) + return task_cpu(p); + } else { /* * Find a cpu with sufficient capacity @@ -5807,7 +5815,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) target_cpu = tmp_target; if ((boosted || prefer_idle) && idle_cpu(target_cpu)) return target_cpu; - } + } else + target_cpu = task_cpu(p); }
if (target_cpu != task_cpu(p)) { -- 1.9.1