This patch plugs the sched-idle code into the idle. It is experimental code and consequently it disables the suspend to idle code as this one is not included in the sched-idle.
Signed-off-by: Daniel Lezcano daniel.lezcano@linaro.org --- kernel/sched/idle-sched.c | 30 ++++++++++++++++++++++++++++++ kernel/sched/idle.c | 11 +++++++++-- kernel/sched/sched.h | 20 ++++++++++++++++++++ 3 files changed, 59 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/idle-sched.c b/kernel/sched/idle-sched.c index 45c6e4c..e078956 100644 --- a/kernel/sched/idle-sched.c +++ b/kernel/sched/idle-sched.c @@ -60,6 +60,36 @@ struct wakeup { */ static DEFINE_PER_CPU(struct wakeup, *wakesup[NR_IRQS]);
+/* + * Variable flag to switch from cpuidle to idle-sched + */ +static int __read_mostly __sched_idle_enabled = 0; + +/* + * Enable the sched_idle subsystem + */ +void sched_idle_enable(void) +{ + __sched_idle_enabled = 1; +} + +/* + * Disable the sched_idle subsystem + */ +void sched_idle_disable(void) +{ + __sched_idle_enabled = 0; +} + +/* + * Accessor flag to check if this idle subsystem should be used + * instead of the old cpuidle framework. + */ +int sched_idle_enabled(void) +{ + return __sched_idle_enabled; +} + /** * stats_add - add a new value in the statistic structure * diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 4a2ef5a..c3f9938 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -247,8 +247,15 @@ static void cpu_idle_loop(void) */ if (cpu_idle_force_poll || tick_check_broadcast_expired()) cpu_idle_poll(); - else - cpuidle_idle_call(); + else { + if (sched_idle_enabled()) { + int latency = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); + s64 duration = sched_idle_next_wakeup(); + sched_idle(duration, latency); + } else { + cpuidle_idle_call(); + } + }
arch_cpu_idle_exit(); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6d2a119..4f80048 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -35,6 +35,26 @@ extern void update_cpu_load_active(struct rq *this_rq); static inline void update_cpu_load_active(struct rq *this_rq) { } #endif
+#ifdef CONFIG_CPU_IDLE_GOV_SCHED +extern int sched_idle(s64 duration, unsigned int latency); +extern s64 sched_idle_next_wakeup(void); +extern int sched_idle_enabled(void); +#else +static inline int sched_idle(s64, unsigned int) +{ + return 0; +} + +static inline s64 sched_idle_next_wakeup(void) +{ +} + +static inline int sched_idle_enabled(void) +{ + return 0; +} +#endif + /* * Helpers for converting nanosecond timing to jiffy resolution */ -- 1.9.1