Use accessor functions to verify state of clockevent devices in core code.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/time/clockevents.c | 24 ++++++++++++------------ kernel/time/tick-broadcast.c | 6 +++--- kernel/time/tick-common.c | 2 +- kernel/time/tick-oneshot.c | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 4922f1b805ea..f533d3b13b92 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -136,7 +136,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
case CLOCK_EVT_STATE_ONESHOT_STOPPED: /* Core internal bug */ - if (WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, + if (WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", dev->state)) return -EINVAL;
@@ -170,7 +170,7 @@ void clockevents_set_state(struct clock_event_device *dev, * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: */ - if (state == CLOCK_EVT_STATE_ONESHOT) { + if (clockevent_state_oneshot(dev)) { if (unlikely(!dev->mult)) { dev->mult = 1; WARN_ON(1); @@ -259,7 +259,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta);
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) + if (clockevent_state_shutdown(dev)) return 0;
dev->retries++; @@ -296,7 +296,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev) delta = dev->min_delta_ns; dev->next_event = ktime_add_ns(ktime_get(), delta);
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) + if (clockevent_state_shutdown(dev)) return 0;
dev->retries++; @@ -328,11 +328,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
dev->next_event = expires;
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) + if (clockevent_state_shutdown(dev)) return 0;
/* We must be in ONESHOT state here */ - WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, "Current state: %d\n", + WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", dev->state);
/* Shortcut for clockevent devices that can deal with ktime. */ @@ -377,7 +377,7 @@ static int clockevents_replace(struct clock_event_device *ced) struct clock_event_device *dev, *newdev = NULL;
list_for_each_entry(dev, &clockevent_devices, list) { - if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) + if (dev == ced || !clockevent_state_detached(dev)) continue;
if (!tick_check_replacement(newdev, dev)) @@ -403,7 +403,7 @@ static int clockevents_replace(struct clock_event_device *ced) static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) { /* Fast track. Device is unused */ - if (ced->state == CLOCK_EVT_STATE_DETACHED) { + if (clockevent_state_detached(ced)) { list_del_init(&ced->list); return 0; } @@ -561,10 +561,10 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) { clockevents_config(dev, freq);
- if (dev->state == CLOCK_EVT_STATE_ONESHOT) + if (clockevent_state_oneshot(dev)) return clockevents_program_event(dev, dev->next_event, false);
- if (dev->state == CLOCK_EVT_STATE_PERIODIC) + if (clockevent_state_periodic(dev)) return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
return 0; @@ -625,7 +625,7 @@ void clockevents_exchange_device(struct clock_event_device *old, }
if (new) { - BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); + BUG_ON(!clockevent_state_detached(new)); clockevents_shutdown(new); } } @@ -681,7 +681,7 @@ void tick_cleanup_dead_cpu(int cpu) if (cpumask_test_cpu(cpu, dev->cpumask) && cpumask_weight(dev->cpumask) == 1 && !tick_is_broadcast_device(dev)) { - BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); + BUG_ON(!clockevent_state_detached(dev)); list_del(&dev->list); } } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 12fcc55d607a..132f819fdcdf 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -303,7 +303,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) raw_spin_lock(&tick_broadcast_lock); bc_local = tick_do_periodic_broadcast();
- if (dev->state == CLOCK_EVT_STATE_ONESHOT) { + if (clockevent_state_oneshot(dev)) { ktime_t next = ktime_add(dev->next_event, tick_period);
clockevents_program_event(dev, next, true); @@ -528,7 +528,7 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc, static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, ktime_t expires) { - if (bc->state != CLOCK_EVT_STATE_ONESHOT) + if (!clockevent_state_oneshot(bc)) clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
clockevents_program_event(bc, expires, 1); @@ -831,7 +831,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
/* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { - int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC; + int was_periodic = clockevent_state_periodic(bc);
bc->event_handler = tick_handle_oneshot_broadcast;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index ea5f9eae8f74..cf881c62c3c5 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -112,7 +112,7 @@ void tick_handle_periodic(struct clock_event_device *dev) return; #endif
- if (dev->state != CLOCK_EVT_STATE_ONESHOT) + if (!clockevent_state_oneshot(dev)) return; for (;;) { /* diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index f8de75715c2f..3f9715bec291 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -36,7 +36,7 @@ int tick_program_event(ktime_t expires, int force) return 0; }
- if (unlikely(dev->state == CLOCK_EVT_STATE_ONESHOT_STOPPED)) { + if (unlikely(clockevent_state_oneshot_stopped(dev))) { /* * We need the clock event again, configure it in ONESHOT mode * before using it.