Since we have no cpu_load update, rename the related functions: s/update_idle_cpu_load/update_idle_rt_avg/ s/update_cpu_load_nohz/update_rt_avg_nohz/ s/update_cpu_load_active/update_avg_load_active/
No functional change.
Signed-off-by: Alex Shi alex.shi@linaro.org --- Documentation/trace/ftrace.txt | 8 ++++---- include/linux/sched.h | 2 +- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/proc.c | 8 ++++---- kernel/sched/sched.h | 4 ++-- kernel/time/tick-sched.c | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index bd36598..2fe46b5 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -1542,12 +1542,12 @@ Doing the same with chrt -r 5 and function-trace set. <idle>-0 3dN.1 12us : menu_hrtimer_cancel <-tick_nohz_idle_exit <idle>-0 3dN.1 12us : ktime_get <-tick_nohz_idle_exit <idle>-0 3dN.1 12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit - <idle>-0 3dN.1 13us : update_cpu_load_nohz <-tick_nohz_idle_exit - <idle>-0 3dN.1 13us : _raw_spin_lock <-update_cpu_load_nohz + <idle>-0 3dN.1 13us : update_rt_avg_nohz <-tick_nohz_idle_exit + <idle>-0 3dN.1 13us : _raw_spin_lock <-update_rt_avg_nohz <idle>-0 3dN.1 13us : add_preempt_count <-_raw_spin_lock - <idle>-0 3dN.2 13us : __update_cpu_load <-update_cpu_load_nohz + <idle>-0 3dN.2 13us : __update_cpu_load <-update_rt_avg_nohz <idle>-0 3dN.2 14us : sched_avg_update <-__update_cpu_load - <idle>-0 3dN.2 14us : _raw_spin_unlock <-update_cpu_load_nohz + <idle>-0 3dN.2 14us : _raw_spin_unlock <-update_rt_avg_nohz <idle>-0 3dN.2 14us : sub_preempt_count <-_raw_spin_unlock <idle>-0 3dN.1 15us : calc_load_exit_idle <-tick_nohz_idle_exit <idle>-0 3dN.1 15us : touch_softlockup_watchdog <-tick_nohz_idle_exit diff --git a/include/linux/sched.h b/include/linux/sched.h index 6c416c8..f6afcb3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -174,7 +174,7 @@ extern unsigned long this_cpu_load(void);
extern void calc_global_load(unsigned long ticks); -extern void update_cpu_load_nohz(void); +extern void update_rt_avg_nohz(void);
extern unsigned long get_parent_ip(unsigned long addr);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 32602595..74dae0e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2431,7 +2431,7 @@ void scheduler_tick(void) raw_spin_lock(&rq->lock); update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); - update_cpu_load_active(rq); + update_avg_load_active(rq); raw_spin_unlock(&rq->lock);
perf_event_task_tick(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6c37ee1..1b008ac 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6986,7 +6986,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); - update_idle_cpu_load(rq); + update_idle_rt_avg(rq); raw_spin_unlock_irq(&rq->lock);
rebalance_domains(rq, CPU_IDLE); diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c index dd3c2d9..42b7706 100644 --- a/kernel/sched/proc.c +++ b/kernel/sched/proc.c @@ -423,7 +423,7 @@ static void calc_load_account_active(struct rq *this_rq) * Called from nohz_idle_balance() to update the load ratings before doing the * idle balance. */ -void update_idle_cpu_load(struct rq *this_rq) +void update_idle_rt_avg(struct rq *this_rq) { unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
@@ -440,17 +440,17 @@ void update_idle_cpu_load(struct rq *this_rq) /* * Called from tick_nohz_idle_exit() */ -void update_cpu_load_nohz(void) +void update_rt_avg_nohz(void) { struct rq *this_rq = this_rq(); - update_idle_cpu_load(this_rq); + update_idle_rt_avg(this_rq); } #endif /* CONFIG_NO_HZ */
/* * Called from scheduler_tick() */ -void update_cpu_load_active(struct rq *this_rq) +void update_avg_load_active(struct rq *this_rq) { this_rq->last_load_update_tick = jiffies; sched_avg_update(this_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c623131..ab310c2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -21,7 +21,7 @@ extern unsigned long calc_load_update; extern atomic_long_t calc_load_tasks;
extern long calc_load_fold_active(struct rq *this_rq); -extern void update_cpu_load_active(struct rq *this_rq); +extern void update_avg_load_active(struct rq *this_rq);
/* * Helpers for converting nanosecond timing to jiffy resolution @@ -1194,7 +1194,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
unsigned long to_ratio(u64 period, u64 runtime);
-extern void update_idle_cpu_load(struct rq *this_rq); +extern void update_idle_rt_avg(struct rq *this_rq);
extern void init_task_runnable_average(struct task_struct *p);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9f8af69..b1a400a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -866,7 +866,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ tick_do_update_jiffies64(now); - update_cpu_load_nohz(); + update_rt_avg_nohz();
calc_load_exit_idle(); touch_softlockup_watchdog();