The patch changes 'fitness' of the destination CPU during migration. The destiation CPU can have lite utilization of around ~6% and in that case the task utilization is neglected (since the src CPU is allready overutilized).
Signed-off-by: Lukasz Luba l.luba@partner.samsung.com --- kernel/sched/fair.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8ac2ad9a5b8d..d6c9e4b41330 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7616,7 +7616,7 @@ static inline bool check_cpu_spare_capacity(int cpu, unsigned int needed_spare_capacity) { return (capacity_of(cpu) > - (cpu_util(cpu) + (needed_spare_capacity / 2))); + (cpu_util(cpu) + needed_spare_capacity));
}
@@ -9105,6 +9105,12 @@ static struct rq *find_busiest_queue(struct lb_env *env, */ #define MAX_PINNED_INTERVAL 512
+static inline bool check_cpu_lite_util(int cpu) +{ + /* Lite utilization is defined as less then ~6% */ + return (capacity_of(cpu) >> 4 >= cpu_util(cpu)); +} + static inline int need_park_into_spare_capacity(struct lb_env *env) { bool fits_in = check_cpu_spare_capacity(env->dst_cpu, @@ -9115,7 +9121,7 @@ static inline int need_park_into_spare_capacity(struct lb_env *env) env->src_rq->cfs.h_nr_running == 1 && cpu_overutilized(env->src_cpu) && !cpu_overutilized(env->dst_cpu) && - fits_in) { + (fits_in || check_cpu_lite_util(env->dst_cpu))) { ret = 1; } else { ret = 0;