This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52.
It needs a revert for controlling whether cpufreq is notified about updating frequency during an update to the utilization.
Cc: Rafael J. Wysocki rjw@rjwysocki.net Cc: Viresh Kumar viresh.kumar@linaro.org Cc: Ingo Molnar mingo@redhat.com Cc: Peter Zijlstra peterz@infradead.org Signed-off-by: Joel Fernandes joelaf@google.com --- kernel/sched/fair.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56f343b8e749..f97693fe8b6e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -784,7 +784,7 @@ void post_init_entity_util_avg(struct sched_entity *se) /* * For !fair tasks do: * - update_cfs_rq_load_avg(now, cfs_rq); + update_cfs_rq_load_avg(now, cfs_rq, false); attach_entity_load_avg(cfs_rq, se); switched_from_fair(rq, p); * @@ -3596,6 +3596,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() * @cfs_rq: cfs_rq to update + * @update_freq: should we call cfs_rq_util_change() or will the call do so * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see @@ -3609,7 +3610,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * call update_tg_load_avg() when this function returns true. */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; struct sched_avg *sa = &cfs_rq->avg; @@ -3646,7 +3647,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif
- if (decayed) + if (update_freq && decayed) cfs_rq_util_change(cfs_rq);
return decayed; @@ -3740,7 +3741,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cpu, cfs_rq, se);
- decayed = update_cfs_rq_load_avg(now, cfs_rq); + decayed = update_cfs_rq_load_avg(now, cfs_rq, true); decayed |= propagate_entity_load_avg(se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) { @@ -3830,7 +3831,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf); #else /* CONFIG_SMP */
static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { return 0; } @@ -7318,7 +7319,7 @@ static void update_blocked_averages(int cpu) if (throttled_hierarchy(cfs_rq)) continue;
- if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) + if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */ @@ -7391,7 +7392,7 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); + update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); rq_unlock_irqrestore(rq, &rf); }
-- 2.15.0.rc2.357.g7e34df9404-goog