From: Morten Rasmussen morten.rasmussen@arm.com
This patch adds load_avg_ratio to each task. The load_avg_ratio is a variant of load_avg_contrib which is not scaled by the task priority. It is calculated like this:
runnable_avg_sum * NICE_0_LOAD / (runnable_avg_period + 1).
Signed-off-by: Morten Rasmussen morten.rasmussen@arm.com --- include/linux/sched.h | 1 + kernel/sched/fair.c | 3 +++ 2 files changed, 4 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4dc4990..81e4e82 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1151,6 +1151,7 @@ struct sched_avg { u64 last_runnable_update; s64 decay_count; unsigned long load_avg_contrib; + unsigned long load_avg_ratio; u32 usage_avg_sum; };
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 095d86c..3e17dd5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1192,6 +1192,9 @@ static inline void __update_task_entity_contrib(struct sched_entity *se) contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); contrib /= (se->avg.runnable_avg_period + 1); se->avg.load_avg_contrib = scale_load(contrib); + contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD); + contrib /= (se->avg.runnable_avg_period + 1); + se->avg.load_avg_ratio = scale_load(contrib); }
/* Compute the current contribution to load_avg by se, return any delta */