The patch....
Signed-off-by: Lukasz Luba l.luba@partner.samsung.com --- kernel/sched/power.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/power.c b/kernel/sched/power.c index c2fc0811bf37..0dcb4579b474 100644 --- a/kernel/sched/power.c +++ b/kernel/sched/power.c @@ -131,20 +131,28 @@ static void sched_power_work(struct kthread_work *work) int i; struct cpu_power *cpower = NULL; struct power_request req; + unsigned int w; + bool need_update = false;
for_each_online_cpu(i) { cpower = (&per_cpu(cpu_power, i)); raw_spin_lock(&cpower->update_lock); + w = cpower->weight; req = cpower->req; cpower->req.time = 0; + cpower->weight = req.weight; raw_spin_unlock(&cpower->update_lock);
if (should_update_next_weight(req.time)) { pr_info("cpower req poped\n"); thermal_cpu_cdev_set_weight(req.cpu, req.weight); + need_update = true; } }
+ if (need_update) + thermal_all_zones_recalc_power(); + sp->work_in_progress = false; }
@@ -167,12 +175,12 @@ static void sched_power_update(struct update_sched_power *update, int cpu, if (!cpower->operating) return;
- sp = cpower->sched_power; - - /* Filter to frequent changes */ + /* Filter to frequent changes or not needed*/ if (!should_update_next_weight(time)) return;
+ sp = cpower->sched_power; + raw_spin_lock(&cpower->update_lock); cpower->req.weight = weight; cpower->req.cpu = cpu;