diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5bed94e..4759676 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -805,17 +805,6 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. -config IRQ_TIME_ACCOUNTING - bool "Fine granularity task level IRQ time accounting" - default n - ---help--- - Select this option to enable fine granularity task irq time - accounting. This is done by reading a timestamp on each - transitions between softirq and hardirq state, so there can be a - small performance impact. - - If in doubt, say N here. - source "kernel/Kconfig.preempt" config X86_UP_APIC diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index bb7f309..3d08f8d 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -137,6 +137,7 @@ static inline void account_system_vtime(struct task_struct *tsk) } #else extern void account_system_vtime(struct task_struct *tsk); +extern u64 irq_time_read(int cpu); #endif #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df00cb0..b033d78 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -762,7 +762,7 @@ static DEFINE_PER_CPU(u64, cpu_hardirq_time); static DEFINE_PER_CPU(u64, cpu_softirq_time); static DEFINE_PER_CPU(u64, irq_start_time); -static int sched_clock_irqtime; +static int sched_clock_irqtime = 1; void enable_sched_clock_irqtime(void) { @@ -789,7 +789,7 @@ static inline void irq_time_write_end(void) __this_cpu_inc(irq_time_seq.sequence); } -static inline u64 irq_time_read(int cpu) +u64 __sched irq_time_read(int cpu) { u64 irq_time; unsigned seq; @@ -811,12 +811,14 @@ static inline void irq_time_write_end(void) { } -static inline u64 irq_time_read(int cpu) +u64 __sched irq_time_read(int cpu) { return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); } #endif /* CONFIG_64BIT */ +EXPORT_SYMBOL(irq_time_read); + /* * Called before incrementing preempt_count on {soft,}irq_enter * and before decrementing preempt_count on {soft,}irq_exit. diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8745ac7..d6d7afc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -299,6 +299,17 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. +config IRQ_TIME_ACCOUNTING + bool "Fine granularity task level IRQ time accounting" + default n + ---help--- + Select this option to enable fine granularity task irq time + accounting. This is done by reading a timestamp on each + transitions between softirq and hardirq state, so there can be a + small performance impact. + + If in doubt, say N here. + config TIMER_STATS bool "Collect kernel timers statistics" depends on DEBUG_KERNEL && PROC_FS