This patch adds some new traces which can help during load balance and/or migration investigations.
Signed-off-by: Lukasz Luba l.luba@partner.samsung.com --- include/trace/events/sched.h | 130 +++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 79d3af7e627e..36aef01a8c00 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -777,6 +777,136 @@ TRACE_EVENT(sched_util_est_cpu, __entry->util_avg, __entry->util_est_runnable) ); + +TRACE_EVENT(sched_active_lb_stop_cpu, + + TP_PROTO(int busiest_cpu, int target_cpu), + + TP_ARGS(busiest_cpu, target_cpu), + + TP_STRUCT__entry( + __field( int, busiest_cpu ) + __field( int, target_cpu ) + ), + + TP_fast_assign( + __entry->busiest_cpu = busiest_cpu; + __entry->target_cpu = target_cpu; + ), + + TP_printk("busiest_cpu=%d target_cpu=%d", + __entry->busiest_cpu, + __entry->target_cpu) +); + +TRACE_EVENT(sched_can_migrate_task, + + TP_PROTO(int pid, int migrate, int task_hot, int busiest_cpu, int target_cpu), + + TP_ARGS(pid, migrate, task_hot, busiest_cpu, target_cpu), + + TP_STRUCT__entry( + __field( int, pid ) + __field( int, migrate ) + __field( int, task_hot ) + __field( int, busiest_cpu ) + __field( int, target_cpu ) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->migrate = migrate; + __entry->task_hot = task_hot; + __entry->busiest_cpu = busiest_cpu; + __entry->target_cpu = target_cpu; + ), + + TP_printk("pid=%d migrate=%d task_hot=%d src_cpu=%d dst_cpu=%d", + __entry->pid, + __entry->migrate, + __entry->task_hot, + __entry->busiest_cpu, + __entry->target_cpu) +); + +TRACE_EVENT(sched_migrate_capacity_comparison, + + TP_PROTO(int src_cpu, int dst_cpu, int src_cpu_cap, int dst_cpu_cap, + int src_cpu_util, int dst_cpu_util, int needed), + + TP_ARGS(src_cpu, dst_cpu, src_cpu_cap, dst_cpu_cap, src_cpu_util, + dst_cpu_util, needed), + + TP_STRUCT__entry( + __field( int, src_cpu ) + __field( int, dst_cpu ) + __field( int, src_cpu_cap ) + __field( int, dst_cpu_cap ) + __field( int, src_cpu_util ) + __field( int, dst_cpu_util ) + __field( int, needed ) + ), + + TP_fast_assign( + __entry->src_cpu = src_cpu; + __entry->dst_cpu = dst_cpu; + __entry->src_cpu_cap = src_cpu_cap; + __entry->dst_cpu_cap = dst_cpu_cap; + __entry->src_cpu_util = src_cpu_util; + __entry->dst_cpu_util = dst_cpu_util; + __entry->needed = needed; + ), + + TP_printk("src_cpu=%d dst_cpu=%d src_cpu_cap=%d dst_cpu_cap=%d src_cpu_util=%d dst_cpu_util=%d needed=%d", + __entry->src_cpu, + __entry->dst_cpu, + __entry->src_cpu_cap, + __entry->dst_cpu_cap, + __entry->src_cpu_util, + __entry->dst_cpu_util, + __entry->needed) +); + +TRACE_EVENT(sched_need_active_balance, + + TP_PROTO(int needed), + + TP_ARGS(needed), + + TP_STRUCT__entry( + __field( int, needed ) + ), + + TP_fast_assign( + __entry->needed = needed; + ), + + TP_printk("needed=%d", __entry->needed) +); + +TRACE_EVENT(sched_find_idlest_cpu, + + TP_PROTO(const struct cpumask *group_cpus, int cpu, int new_cpu), + + TP_ARGS(group_cpus, cpu, new_cpu), + + TP_STRUCT__entry( + __bitmask(cpumask, num_possible_cpus()) + __field( int, cpu ) + __field( int, new_cpu ) + ), + + TP_fast_assign( + __assign_bitmask(cpumask, cpumask_bits(group_cpus), + num_possible_cpus()); + __entry->cpu = cpu; + __entry->new_cpu = new_cpu; + ), + + TP_printk("group_cpus=%s cpu=%d new_cpu=%d", + __get_bitmask(cpumask), __entry->cpu, __entry->new_cpu) +); + #endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */