At several instances we iterate over all possible clock-bases for a particular cpu-base. Whereas, we only need to iterate over active bases.
We already have per cpu-base 'active_bases' field, which is updated on addition/removal of hrtimers.
This patch creates for_each_active_base(), which uses 'active_bases' to iterate only over active bases.
This also updates code which iterates over clock-bases.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/time/hrtimer.c | 65 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 24 deletions(-)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 3152f327c988..9da63e9ee63b 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -110,6 +110,31 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) }
+static inline int __next_bit(unsigned int active_bases, int bit) +{ + do { + if (active_bases & (1 << bit)) + return bit; + } while (++bit < HRTIMER_MAX_CLOCK_BASES); + + /* We should never reach here */ + return 0; +} + +/* + * for_each_active_base: iterate over all active clock bases + * @_bit: 'int' variable for internal purpose + * @_base: holds pointer to a active clock base + * @_cpu_base: cpu base to iterate on + * @_active_bases: 'unsigned int' variable for internal purpose + */ +#define for_each_active_base(_bit, _base, _cpu_base, _active_bases) \ + for ((_active_bases) = (_cpu_base)->active_bases, (_bit) = -1; \ + (_active_bases) && \ + ((_bit) = __next_bit(_active_bases, ++_bit), \ + (_base) = (_cpu_base)->clock_base + _bit); \ + (_active_bases) &= ~(1 << (_bit))) + /* * Get the coarse grained time at the softirq based on xtime and * wall_to_monotonic. @@ -443,19 +468,15 @@ static inline void debug_deactivate(struct hrtimer *timer) #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) { - struct hrtimer_clock_base *base = cpu_base->clock_base; + struct hrtimer_clock_base *base; ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; + struct hrtimer *timer; + unsigned int active_bases; int i;
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { - struct timerqueue_node *next; - struct hrtimer *timer; - - next = timerqueue_getnext(&base->active); - if (!next) - continue; - - timer = container_of(next, struct hrtimer, node); + for_each_active_base(i, base, cpu_base, active_bases) { + timer = container_of(timerqueue_getnext(&base->active), + struct hrtimer, node); expires = ktime_sub(hrtimer_get_expires(timer), base->offset); if (expires.tv64 < expires_next.tv64) expires_next = expires; @@ -1245,6 +1266,8 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ktime_t expires_next, now, entry_time, delta; + struct hrtimer_clock_base *base; + unsigned int active_bases; int i, retries = 0;
BUG_ON(!cpu_base->hres_active); @@ -1264,15 +1287,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) */ cpu_base->expires_next.tv64 = KTIME_MAX;
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - struct hrtimer_clock_base *base; + for_each_active_base(i, base, cpu_base, active_bases) { struct timerqueue_node *node; ktime_t basenow;
- if (!(cpu_base->active_bases & (1 << i))) - continue; - - base = cpu_base->clock_base + i; basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) { @@ -1435,16 +1453,13 @@ void hrtimer_run_queues(void) struct timerqueue_node *node; struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); struct hrtimer_clock_base *base; + unsigned int active_bases; int index, gettime = 1;
if (hrtimer_hres_active()) return;
- for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { - base = &cpu_base->clock_base[index]; - if (!timerqueue_getnext(&base->active)) - continue; - + for_each_active_base(index, base, cpu_base, active_bases) { if (gettime) { hrtimer_get_softirq_time(cpu_base); gettime = 0; @@ -1665,6 +1680,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, static void migrate_hrtimers(int scpu) { struct hrtimer_cpu_base *old_base, *new_base; + struct hrtimer_clock_base *clock_base; + unsigned int active_bases; int i;
BUG_ON(cpu_online(scpu)); @@ -1680,9 +1697,9 @@ static void migrate_hrtimers(int scpu) raw_spin_lock(&new_base->lock); raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - migrate_hrtimer_list(&old_base->clock_base[i], - &new_base->clock_base[i]); + for_each_active_base(i, clock_base, old_base, active_bases) { + migrate_hrtimer_list(clock_base, + &new_base->clock_base[clock_base->index]); }
raw_spin_unlock(&old_base->lock);