Function select_energy_cpu_brute() can firstly handle for case 'next_cpu == prev_cpu', so can avoid big amount code indentation under the case 'next_cpu != prev_cpu'.
Change-Id: Iac08b5f1e6072805c0bee22680bd0ecab411cdba Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 91 +++++++++++++++++++++++++---------------------------- 1 file changed, 42 insertions(+), 49 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f0b307..b67999a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7060,6 +7060,8 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync int target_cpu; int backup_cpu; int next_cpu; + int delta = 0; + struct energy_env eenv;
schedstat_inc(p->se.statistics.nr_wakeups_secb_attempts); schedstat_inc(this_rq()->eas_stats.secb_attempts); @@ -7108,63 +7110,54 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync }
target_cpu = prev_cpu; - if (next_cpu != prev_cpu) { - int delta = 0; - struct energy_env eenv = { - .p = p, - .util_delta = task_util(p), - /* Task's previous CPU candidate */ - .cpu[EAS_CPU_PRV] = { - .cpu_id = prev_cpu, - }, - /* Main alternative CPU candidate */ - .cpu[EAS_CPU_NXT] = { - .cpu_id = next_cpu, - }, - /* Backup alternative CPU candidate */ - .cpu[EAS_CPU_BKP] = { - .cpu_id = backup_cpu, - }, - }; + if (next_cpu == prev_cpu) { + schedstat_inc(p->se.statistics.nr_wakeups_secb_count); + schedstat_inc(this_rq()->eas_stats.secb_count); + goto unlock; + } + + eenv.p = p; + eenv.util_delta = task_util(p); + /* Task's previous CPU candidate */ + eenv.cpu[EAS_CPU_PRV].cpu_id = prev_cpu; + /* Main alternative CPU candidate */ + eenv.cpu[EAS_CPU_NXT].cpu_id = next_cpu; + /* Backup alternative CPU candidate */ + eenv.cpu[EAS_CPU_BKP].cpu_id = backup_cpu;
#ifdef CONFIG_SCHED_WALT - if (!walt_disabled && sysctl_sched_use_walt_cpu_util && - p->state == TASK_WAKING) - delta = task_util(p); + if (!walt_disabled && sysctl_sched_use_walt_cpu_util && + p->state == TASK_WAKING) + delta = task_util(p); #endif - /* Not enough spare capacity on previous cpu */ - if (__cpu_overutilized(prev_cpu, delta)) { - schedstat_inc(p->se.statistics.nr_wakeups_secb_insuff_cap); - schedstat_inc(this_rq()->eas_stats.secb_insuff_cap); - target_cpu = next_cpu; - goto unlock; - } - - cpumask_clear(&eenv.cpus_mask); - if (prev_cpu >= 0) - cpumask_set_cpu(prev_cpu, &eenv.cpus_mask); - if (next_cpu >= 0) - cpumask_set_cpu(next_cpu, &eenv.cpus_mask); - if (backup_cpu >= 0) - cpumask_set_cpu(backup_cpu, &eenv.cpus_mask); + /* Not enough spare capacity on previous cpu */ + if (__cpu_overutilized(prev_cpu, delta)) { + schedstat_inc(p->se.statistics.nr_wakeups_secb_insuff_cap); + schedstat_inc(this_rq()->eas_stats.secb_insuff_cap); + target_cpu = next_cpu; + goto unlock; + }
- select_energy_cpu_idx(&eenv); + cpumask_clear(&eenv.cpus_mask); + if (prev_cpu >= 0) + cpumask_set_cpu(prev_cpu, &eenv.cpus_mask); + if (next_cpu >= 0) + cpumask_set_cpu(next_cpu, &eenv.cpus_mask); + if (backup_cpu >= 0) + cpumask_set_cpu(backup_cpu, &eenv.cpus_mask);
- /* Check if EAS_CPU_NXT is a more energy efficient CPU */ - if (eenv.next_idx != EAS_CPU_PRV) { - schedstat_inc(p->se.statistics.nr_wakeups_secb_nrg_sav); - schedstat_inc(this_rq()->eas_stats.secb_nrg_sav); - } else { - schedstat_inc(p->se.statistics.nr_wakeups_secb_no_nrg_sav); - schedstat_inc(this_rq()->eas_stats.secb_no_nrg_sav); - } + select_energy_cpu_idx(&eenv);
- target_cpu = eenv.cpu[eenv.next_idx].cpu_id; - goto unlock; + /* Check if EAS_CPU_NXT is a more energy efficient CPU */ + if (eenv.next_idx != EAS_CPU_PRV) { + schedstat_inc(p->se.statistics.nr_wakeups_secb_nrg_sav); + schedstat_inc(this_rq()->eas_stats.secb_nrg_sav); + } else { + schedstat_inc(p->se.statistics.nr_wakeups_secb_no_nrg_sav); + schedstat_inc(this_rq()->eas_stats.secb_no_nrg_sav); }
- schedstat_inc(p->se.statistics.nr_wakeups_secb_count); - schedstat_inc(this_rq()->eas_stats.secb_count); + target_cpu = eenv.cpu[eenv.next_idx].cpu_id;
unlock: rcu_read_unlock(); -- 1.9.1