Check if some of the CPUs in the group were actually idle. If it was balancing due to fork and all CPUs are loaded, try to run in the same group.
Signed-off-by: Lukasz Luba l.luba@partner.samsung.com --- kernel/sched/fair.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d6c9e4b41330..0ceebc12bb4d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6214,6 +6214,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; + struct sched_group *group_with_idle = NULL; unsigned long min_runnable_load = ULONG_MAX; unsigned long this_runnable_load = ULONG_MAX; unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; @@ -6222,6 +6223,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; + bool found_local_idle = false; + int found_idle_cpu = -1;
if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; @@ -6263,6 +6266,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (spare_cap > max_spare_cap) max_spare_cap = spare_cap; + + /* If there is an idle CPU, try it */ + if (idle_cpu(i)) { + if (found_local_idle) + continue; + if (local_group) + found_local_idle = true; + + found_idle_cpu = i; + group_with_idle = group; + } }
/* Adjust by relative CPU capacity of the group */ @@ -6313,7 +6327,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, * utilization. */ if (sd_flag & SD_BALANCE_FORK) - goto skip_spare; + goto try_skip_packing;
if (this_spare > task_util(p) / 2 && imbalance_scale*this_spare > 100*most_spare) @@ -6322,7 +6336,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (most_spare > task_util(p) / 2) return most_spare_sg;
-skip_spare: +try_skip_packing: + if (found_idle_cpu != -1) + return group_with_idle; + if (!idlest) return NULL;
@@ -6333,6 +6350,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, (100*this_avg_load < imbalance_scale*min_avg_load)) return NULL;
+ /* Last try: all CPUs are loaded, so keep continue on current */ + if (found_idle_cpu == -1 && sd_flag & SD_BALANCE_FORK) + return NULL; + return idlest; }
@@ -6414,6 +6435,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p }
new_cpu = find_idlest_group_cpu(group, p, cpu); + trace_sched_find_idlest_cpu(sched_group_span(group), cpu, + new_cpu); if (new_cpu == cpu) { /* Now try balancing at a lower domain level of cpu */ sd = sd->child;