On 06-03-18, 11:26, Patrick Bellasi wrote:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1f1a73c8b7c9..9d2780c8c80b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6743,7 +6743,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, unsigned long target_max_spare_cap = 0; unsigned long target_util = ULONG_MAX; unsigned long best_active_util = ULONG_MAX;
- unsigned long target_idle_max_spare_cap = 0;
- unsigned long best_idle_max_spare_cap = 0; int best_idle_cstate = INT_MAX; struct sched_domain *sd; struct sched_group *sg;
@@ -6929,15 +6929,25 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, */ if (idle_cpu(i)) { int idle_idx = idle_get_state_idx(cpu_rq(i));
int max_spare_cap; /* Select idle CPU with lower cap_orig */ if (capacity_orig > best_idle_min_cap_orig) continue;
/* Favor CPUs that won't end up running at a
/*
* A more energy efficient CPU has been found,
* let's reset the conditions for the
* following min_capped_utilization check
*/
best_idle_max_spare_cap = 0;
That looks wrong (I haven't tested it though). You shouldn't be doing this unconditionally here as this makes the below comparison useless (which will always fail now).
The best you can do here is:
if (capacity_orig < best_idle_min_cap_orig) best_idle_max_spare_cap = 0;
In which I case I feel the original patch I posted looks better somehow.
/*
* Favor CPUs that won't end up running at a * high OPP. */
if ((capacity_orig - min_capped_util) <
target_idle_max_spare_cap)
max_spare_cap = capacity_orig - min_capped_util;
if (max_spare_cap < best_idle_max_spare_cap) continue; /*
@@ -6951,9 +6961,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, continue;
/* Keep track of best idle CPU */
best_idle_max_spare_cap = max_spare_cap; best_idle_min_cap_orig = capacity_orig;
target_idle_max_spare_cap = capacity_orig -
min_capped_util; best_idle_cstate = idle_idx; best_idle_cpu = i; continue;
-- 2.15.1
-- viresh