This commit changes to use per cpu data to maintain energy environment structure, so can move it out from kernel stack and avoid the stack overflow issue if we want to add more items into energy environment structure for later optimization.
Change-Id: I7aed4c972c464ca683828d85b9b8f9311622da55 Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/fair.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4d5d900..6dee639 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5453,6 +5453,8 @@ struct energy_env { } cap; };
+static DEFINE_PER_CPU(struct energy_env, energy_env); + static int cpu_util_wake(int cpu, struct task_struct *p);
/* @@ -7079,14 +7081,14 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
if (target_cpu != prev_cpu) { int delta = 0; - struct energy_env eenv = { - .util_delta = task_util(p), - .src_cpu = prev_cpu, - .dst_cpu = target_cpu, - .task = p, - .trg_cpu = target_cpu, - }; + struct energy_env *eenv = this_cpu_ptr(&energy_env);
+ memset(eenv, 0x0, sizeof(*eenv)); + eenv->util_delta = task_util(p); + eenv->task = p; + eenv->src_cpu = prev_cpu; + eenv->dst_cpu = target_cpu; + eenv->trg_cpu = target_cpu;
#ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util && @@ -7100,14 +7102,14 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync goto unlock; }
- if (energy_diff(&eenv) >= 0) { + if (energy_diff(eenv) >= 0) { /* No energy saving for target_cpu, try backup */ target_cpu = tmp_backup; - eenv.dst_cpu = target_cpu; - eenv.trg_cpu = target_cpu; + eenv->dst_cpu = target_cpu; + eenv->trg_cpu = target_cpu; if (tmp_backup < 0 || tmp_backup == prev_cpu || - energy_diff(&eenv) >= 0) { + energy_diff(eenv) >= 0) { schedstat_inc(p->se.statistics.nr_wakeups_secb_no_nrg_sav); schedstat_inc(this_rq()->eas_stats.secb_no_nrg_sav); target_cpu = prev_cpu; -- 1.9.1