From: Steve Muckle smuckle@linaro.org
In order to trigger a frequency change on the target CPU of a remote wakeup, a client of the scheduler cpufreq hook may need to send an IPI to that target CPU.
This IPI is redundant if the wakeup causes preemption because the scheduler (and the scheduler cpufreq hook) will run soon on the target CPU in that case anyway.
Add a late cpufreq callback to cover the non-preemption case. It can be requested by the scheduler cpufreq hook client by setting rq->cpufreq_late_cb. The callback is only invoked if preemption does not occur.
Signed-off-by: Steve Muckle smuckle@linaro.org Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/core.c | 4 ++++ kernel/sched/sched.h | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c56fb57f2991..5e99fdae2590 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -485,6 +485,8 @@ void resched_curr(struct rq *rq)
lockdep_assert_held(&rq->lock);
+ cpufreq_clear_late_cb(rq); + if (test_tsk_need_resched(curr)) return;
@@ -970,6 +972,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) */ if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) rq_clock_skip_update(rq, true); + + cpufreq_late_update(rq); }
#ifdef CONFIG_SMP diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7b34c7826ca5..108d159f9a35 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -727,6 +727,10 @@ struct rq { /* Must be inspected within a rcu lock section */ struct cpuidle_state *idle_state; #endif + +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ) + unsigned char cpufreq_late_cb; +#endif };
static inline int cpu_of(struct rq *rq) @@ -1817,6 +1821,23 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ) +static inline void cpufreq_late_update(struct rq *rq) +{ + if (rq->cpufreq_late_cb) { + cpufreq_update_util(rq, 0); + rq->cpufreq_late_cb = false; + } +} +static inline void cpufreq_clear_late_cb(struct rq *rq) +{ + rq->cpufreq_late_cb = false; +} +#else +static inline void cpufreq_late_update(struct rq *rq) {} +static inline void cpufreq_clear_late_cb(struct rq *rq) {} +#endif /* CONFIG_SMP && CONFIG_CPU_FREQ */ + #ifdef arch_scale_freq_capacity #ifndef arch_scale_freq_invariant #define arch_scale_freq_invariant() (true) -- 2.7.1.410.g6faf27b