Many IRQs are quiet most of the time, or they tend to come in bursts of fairly equal time intervals within each burst. It is therefore possible to detect those IRQs with stable intervals and guestimate when the next IRQ event is most likely to happen.
Examples of such IRQs may include audio related IRQs where the FIFO size and/or DMA descriptor size with the sample rate create stable intervals, block devices during large data transfers, etc. Even network streaming of multimedia content creates patterns of periodic network interface IRQs in some cases.
This patch adds code to track the mean interval and variance for each IRQ over a window of time intervals between IRQ events. Those statistics can be used to assist cpuidle in selecting the most appropriate sleep state by predicting the most likely time for the next interrupt.
Because the stats are gathered in interrupt context, the core computation is as light as possible.
Signed-off-by: Daniel Lezcano daniel.lezcano@linaro.org --- drivers/cpuidle/Kconfig | 5 + kernel/irq/Kconfig | 1 - kernel/sched/Makefile | 1 + kernel/sched/idle-sched.c | 409 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 415 insertions(+), 1 deletion(-) create mode 100644 kernel/sched/idle-sched.c
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 8c7930b..dd17215 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -25,6 +25,11 @@ config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" default y
+config CPU_IDLE_GOV_SCHED + bool "Sched idle governor" + select IRQ_TIMINGS + default y + config DT_IDLE_STATES bool
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 1275fd1..81557ae 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -75,7 +75,6 @@ config HANDLE_DOMAIN_IRQ
config IRQ_TIMINGS bool - default y
config IRQ_DOMAIN_DEBUG bool "Expose hardware/virtual IRQ mapping via debugfs" diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 6768797..f7d5a35 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -19,3 +19,4 @@ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o +obj-$(CONFIG_CPU_IDLE_GOV_SCHED) += idle-sched.o diff --git a/kernel/sched/idle-sched.c b/kernel/sched/idle-sched.c new file mode 100644 index 0000000..bfbd07f --- /dev/null +++ b/kernel/sched/idle-sched.c @@ -0,0 +1,409 @@ +/* + * Copyright (C) 2016 Linaro Ltd, Daniel Lezcano daniel.lezcano@linaro.org + * Nicolas Pitre nicolas.pitre@linaro.org + * + */ +#include <linux/cpuidle.h> +#include <linux/interrupt.h> +#include <linux/irqdesc.h> +#include <linux/ktime.h> +#include <linux/slab.h> +#include <linux/stats.h> +#include <linux/tick.h> +#include <linux/time64.h> + +/* + * Define the number of values we will dealing with to the monitor the + * interruptions. + */ +#define STATS_MAX_VALUE 8 + +struct wakeup { + struct stats *stats; + ktime_t timestamp; + int predictable; + int good; + int bad; +}; + +/* + * Per cpu and irq statistics. Each cpu receives interrupts and those + * ones can be distributed following an irq chip specific + * algorithm. Random irq distribution is the worst case to predict + * interruption behavior but usually that does not happen or could be + * fixed from userspace by setting the irq affinity. + */ +static DEFINE_PER_CPU(struct wakeup, *wakesup[NR_IRQS]); + +/** + * sched_idle_irq - irq timestamp callback + * + * @irq: the irq number + * @timestamp: when the interrupt occured + * @dev_id: device id for shared interrupt (not yet used) + * @data: private data given when registering this callback + * + * Interrupt callback called when an interrupt happens. This function + * is critical as it is called under an interrupt section: minimum + * operations as possible are done here. + * + */ +static void sched_idle_irq(unsigned int irq, ktime_t timestamp, + void *dev_id, void *data) +{ + s32 diff; + u32 stddev, mean; + unsigned int cpu = raw_smp_processor_id(); + struct wakeup *w = per_cpu(wakesup[irq], cpu); + + if (!w) + return; + + /* + * It is the first time the interrupt occurs, we can't do any stats, + * just store the timestamp and exit. + */ + if (!ktime_to_us(w->timestamp)) { + w->timestamp = timestamp; + return; + } + + /* + * Microsec resolution is enough for our purpose. + */ + diff = ktime_us_delta(timestamp, w->timestamp); + w->timestamp = timestamp; + + /* + * If ~one second elapsed since the last interrupt, we can + * just drop the statistics and set the wake up source as non + * predictable. We have to wait for the next interval to + * continue filling the stats. + */ + if (diff > (1 << 20)) { + stats_reset(w->stats); + w->predictable = 0; + return; + } + + mean = stats_mean(w->stats); + stddev = stats_stddev(w->stats); + stats_add(w->stats, diff); + + /* + * We check the value is between the mean - stddev and mean + stddev + * Until the value is in this interval, we are in a repeating pattern and + * the wakeup is considered predictable. + */ + if (diff > (mean - stddev) && diff < (mean + stddev)) { + w->predictable = 1; + w->good++; + } else { + w->bad++; + } +} + +/* + * Callback to be called when an interrupt happens. + */ +static struct irqtimings irq_timings = { + .handler = sched_idle_irq, +}; + +static ktime_t next_irq_event(void) +{ + unsigned int irq, cpu = raw_smp_processor_id(); + ktime_t diff, next, min = (ktime_t){ .tv64 = KTIME_MAX }; + struct wakeup *w; + + /* + * Lookup the interrupt array for this cpu and search for the earlier + * expected interruption. + */ + for (irq = 0; irq < NR_IRQS; irq++) { + w = per_cpu(wakesup[irq], cpu); + + /* + * The interrupt was not setup as a source of a wakeup + * or the wakeup source is not considered at this + * moment stable enough to do a prediction. + */ + if (!w || !w->predictable) + continue; + + /* + * Let's compute the next irq: the wakeup source is + * considered predictable, we add the average interval + * time added to the latest interruption event time. + */ + next = ktime_add_us(w->timestamp, stats_mean(w->stats)); + + /* + * If the interrupt is supposed to happen before the + * minimum time, then it becomse the minimum itself. + */ + if (ktime_before(next, min)) + min = next; + } + + /* + * At this point, we have our prediction but the caller is + * expecting the remaining time before the next event, so + * compute the diff. + */ + diff = ktime_sub(min, ktime_get()); + + /* + * The result could be negative for different reasons: + * - the prediction is incorrect + * - the prediction was too near now and expired while we were + * in this function + * + * In both cases, we return KTIME_MAX as a failure to do a + * prediction + */ + if (ktime_compare(diff, ktime_set(0, 0)) <= 0) + return (ktime_t){ .tv64 = KTIME_MAX }; + + return diff; +} + +/** + * sched_idle_next_wakeup - Predict the next wakeup on the current cpu + * + * The next event on the cpu is based on a statistic approach of the + * interrupt events and the timer deterministic value. From the timer + * or the irqs, we return the one expected to occur first. + * + * Returns the expected remaining idle time before being woken up by + * an interruption. + */ +s64 sched_idle_next_wakeup(void) +{ + s64 next_timer = ktime_to_us(tick_nohz_get_sleep_length()); + s64 next_irq = ktime_to_us(next_irq_event()); + + return min(next_irq, next_timer); +} + +/** + * sched_idle - go to idle for a specified amount of time + * + * @duration: the idle duration time + * @latency: the latency constraint + * + * Returns 0 on success, < 0 otherwise. + */ +int sched_idle(s64 duration, unsigned int latency) +{ + struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + struct cpuidle_state_usage *su; + struct cpuidle_state *s; + int i, ret = 0, index = -1; + + rcu_idle_enter(); + + /* + * No cpuidle driver is available, let's use the default arch + * idle function. + */ + if (cpuidle_not_available(drv, dev)) + goto default_idle; + + /* + * Find the idle state with the lowest power while satisfying + * our constraints. We will save energy if the duration of the + * idle time is bigger than the target residency which is the + * break even point. The choice will be modulated by the + * latency. + */ + for (i = 0; i < drv->state_count; i++) { + + s = &drv->states[i]; + + su = &dev->states_usage[i]; + + if (s->disabled || su->disable) + continue; + if (s->target_residency > duration) + continue; + if (s->exit_latency > latency) + continue; + + index = i; + } + + /* + * The idle task must be scheduled, it is pointless to go to + * idle, just re-enable the interrupt and return. + */ + if (current_clr_polling_and_test()) { + local_irq_enable(); + goto out; + } + + if (index < 0) { + /* + * No idle callbacks fulfilled the constraints, jump + * to the default function like there wasn't any + * cpuidle driver. + */ + goto default_idle; + } else { + /* + * Enter the idle state previously returned by the + * governor decision. This function will block until + * an interrupt occurs and will take care of + * re-enabling the local interrupts + */ + return cpuidle_enter(drv, dev, index); + } + +default_idle: + default_idle_call(); +out: + rcu_idle_exit(); + return ret; +} + +/** + * sched_irq_timing_free - free_irq callback + * + * @irq: the irq number to stop tracking + * @dev_id: not used at the moment + * + * This function will remove from the wakeup source prediction table. + */ +static void sched_irq_timing_free(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct wakeup *w; + unsigned int cpu; + unsigned long flags; + + for_each_possible_cpu(cpu) { + w = per_cpu(wakesup[irq], cpu); + + if (!w) + continue; + + if (w->stats) + stats_free(w->stats); + + kfree(w); + + per_cpu(wakesup[irq], cpu) = NULL; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->timings = NULL; + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + + +/** + * sched_irq_timing_setup - setup_irq callback + * + * @irq: the interrupt numbe to be tracked + * @act: the new irq action to be set to this interrupt + * + * Update the irq table to be tracked in order to predict the next event. + * + * Returns zero on success. On error it returns -ENOMEM. + */ +static int sched_irq_timing_setup(unsigned int irq, struct irqaction *act) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct wakeup *w; + unsigned int cpu; + unsigned long flags; + int ret = -ENOMEM; + + /* + * No interrupt set for this descriptor or related to a timer. + * Timers are deterministic, so no need to try to do any + * prediction on them. No error for both cases, we are just not + * interested. + */ + if (!act || (act->flags & __IRQF_TIMER)) + return 0; + + /* + * Allocates the wakeup structure and the stats structure. As + * the interrupt can occur on any cpu, allocate the wakeup + * structure per cpu basis. + */ + for_each_possible_cpu(cpu) { + + w = kzalloc(sizeof(*w), GFP_KERNEL); + if (!w) + goto undo; + + w->stats = stats_alloc(STATS_MAX_VALUE); + if (!w->stats) { + kfree(w); + goto undo; + } + + per_cpu(wakesup[irq], cpu) = w; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->timings = &irq_timings; + raw_spin_unlock_irqrestore(&desc->lock, flags); + + ret = 0; +undo: + /* + * Rollback all allocations on failure + */ + if (ret) + sched_irq_timing_free(irq, act->dev_id); + + return ret; +} + +/* + * Setup/free irq callbacks + */ +static struct irqtimings_ops irqt_ops = { + .setup = sched_irq_timing_setup, + .free = sched_irq_timing_free, +}; + +/** + * sched_idle_init - setup the interrupt tracking table + * + * At init time, some interrupts could have been setup and in the + * system life time, some devices could be setup. In order to track + * all interrupts we are interested in, we first register a couple of + * callback to keep up-to-date the interrupt tracking table and then + * we setup the table with the interrupt which were already set up. + */ +int __init sched_idle_init(void) +{ + struct irq_desc *desc; + unsigned int irq; + int ret; + + /* + * Register the setup/free irq callbacks, so new interrupt or + * freed interrupt will update their tracking. + */ + ret = register_irq_timings(&irqt_ops); + if (ret) { + pr_err("Failed to register timings ops\n"); + return ret; + } + + /* + * For all the irq already setup, assign the timing callback. + * All interrupts with their desc NULL will be discarded. + */ + for_each_irq_desc(irq, desc) + sched_irq_timing_setup(irq, desc->action); + + return 0; +} +late_initcall(sched_idle_init); -- 1.9.1