From: Nicolas Pitre nicolas.pitre@linaro.org
Signed-off-by: Nicolas Pitre nico@linaro.org Signed-off-by: Daniel Lezcano daniel.lezcano@linaro.org --- include/trace/events/irq.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/idle-sched.c | 4 ++++ 2 files changed, 48 insertions(+)
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index ff8f6c0..fd25da5 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -99,6 +99,50 @@ TRACE_EVENT(irq_handler_exit, __entry->irq, __entry->ret ? "handled" : "unhandled") );
+#ifdef CONFIG_IRQ_TIMINGS +/** + * irq_timings - provide updated IRQ timing statistics + * @irq: irq number + * @interval: time interval since last irq + * @stddev: time interval stddev + * @mean: mean interval + * @good: current count of predictable irqs + * @bad: current count of unpredictable irqs + * + */ +TRACE_EVENT(irq_timings, + + TP_PROTO(int irq, int cpu, s32 interval, s32 mean, + u32 stddev, u32 good, u32 bad), + + TP_ARGS(irq, cpu, interval, mean, stddev, good, bad), + + TP_STRUCT__entry( + __field( unsigned int, irq ) + __field( unsigned int, cpu ) + __field( s32, interval ) + __field( s32, mean ) + __field( u32, stddev ) + __field( u32, good ) + __field( u32, bad ) + ), + + TP_fast_assign( + __entry->irq = irq; + __entry->cpu = cpu; + __entry->interval = interval; + __entry->mean = mean; + __entry->stddev = stddev; + __entry->good = good; + __entry->bad = bad; + ), + + TP_printk("irq=%u/cpu=%u intv=%d mean=%d stddev=%u (%u vs %u)", + __entry->irq, __entry->cpu, __entry->interval, __entry->mean, + __entry->stddev, __entry->good, __entry->bad) +); +#endif + DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr), diff --git a/kernel/sched/idle-sched.c b/kernel/sched/idle-sched.c index bfbd07f..f38ba67 100644 --- a/kernel/sched/idle-sched.c +++ b/kernel/sched/idle-sched.c @@ -12,6 +12,8 @@ #include <linux/tick.h> #include <linux/time64.h>
+#include <trace/events/irq.h> + /* * Define the number of values we will dealing with to the monitor the * interruptions. @@ -101,6 +103,8 @@ static void sched_idle_irq(unsigned int irq, ktime_t timestamp, } else { w->bad++; } + + trace_irq_timings(irq, cpu, diff, mean, stddev, w->good, w->bad); }
/* -- 1.9.1