We don't need to track the running timer for a cpu base anymore, but sill need to check business of base for sanity checking during CPU hotplug.
Lets replace 'running_timer' with 'busy' for handle that efficiently.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/time/timer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 364644811485..2db05206594b 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -77,7 +77,7 @@ struct tvec_root {
struct tvec_base { spinlock_t lock; - struct timer_list *running_timer; + bool busy; unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -1180,6 +1180,8 @@ static inline void __run_timers(struct tvec_base *base) spin_unlock_irq(&base->lock); return; } + + base->busy = true; while (time_after_eq(jiffies, base->timer_jiffies)) { struct list_head work_list; struct list_head *head = &work_list; @@ -1236,7 +1238,6 @@ static inline void __run_timers(struct tvec_base *base)
timer_stats_account_timer(timer);
- base->running_timer = timer; timer_set_running(timer); detach_expired_timer(timer, base);
@@ -1270,7 +1271,7 @@ static inline void __run_timers(struct tvec_base *base) } } } - base->running_timer = NULL; + base->busy = false; spin_unlock_irq(&base->lock); }
@@ -1675,7 +1676,7 @@ static void migrate_timers(int cpu) spin_lock_irq(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- BUG_ON(old_base->running_timer); + BUG_ON(old_base->busy);
for (i = 0; i < TVR_SIZE; i++) migrate_timer_list(new_base, old_base->tv1.vec + i);