From: Chris Redpath chris.redpath@arm.com
If we migrate a sleeping task away from a CPU which has the tick stopped, then both the clock_task and decay_counter will be out of date for that CPU and we will not decay load correctly regardless of how often we update the blocked load.
This is only an issue for tasks which are not on a runqueue (because otherwise that CPU would be awake) and simultaneously the CPU the task previously ran on has had the tick stopped.
Signed-off-by: Chris Redpath chris.redpath@arm.com Signed-off-by: Jon Medhurst tixy@linaro.org --- kernel/sched/fair.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 01cc427..86e82c8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4379,6 +4379,7 @@ unlock: * load-balance). */ #ifdef CONFIG_FAIR_GROUP_SCHED +static int nohz_test_cpu(int cpu); /* * Called immediately before a task is migrated to a new cpu; task_cpu(p) and * cfs_rq_of(p) references at time of call are still valid and identify the @@ -4390,7 +4391,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); - /* * Load tracking: accumulate removed load so that it can be processed * when we next update owning cfs_rq under rq->lock. Tasks contribute @@ -4398,6 +4398,25 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) * be negative here since on-rq tasks have decay-count == 0. */ if (se->avg.decay_count) { + /* + * If we migrate a sleeping task away from a CPU + * which has the tick stopped, then both the clock_task + * and decay_counter will be out of date for that CPU + * and we will not decay load correctly. + */ + if (!se->on_rq && nohz_test_cpu(task_cpu(p))) { + struct rq *rq = cpu_rq(task_cpu(p)); + unsigned long flags; + /* + * Current CPU cannot be holding rq->lock in this + * circumstance, but another might be. We must hold + * rq->lock before we go poking around in its clocks + */ + raw_spin_lock_irqsave(&rq->lock, flags); + update_rq_clock(rq); + update_cfs_rq_blocked_load(cfs_rq, 0); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } se->avg.decay_count = -__synchronize_entity_decay(se); atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); } @@ -6329,6 +6348,17 @@ static struct { atomic_t nr_cpus; unsigned long next_balance; /* in jiffy units */ } nohz ____cacheline_aligned; +/* + * nohz_test_cpu used when load tracking is enabled. FAIR_GROUP_SCHED + * dependency below may be removed when load tracking guards are + * removed. + */ +#ifdef CONFIG_FAIR_GROUP_SCHED +static int nohz_test_cpu(int cpu) +{ + return cpumask_test_cpu(cpu, nohz.idle_cpus_mask); +} +#endif
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING /* @@ -6481,6 +6511,18 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, return NOTIFY_DONE; } } +#else +/* + * nohz_test_cpu used when load tracking is enabled. FAIR_GROUP_SCHED + * dependency below may be removed when load tracking guards are + * removed. + */ +#ifdef CONFIG_FAIR_GROUP_SCHED +static int nohz_test_cpu(int cpu) +{ + return 0; +} +#endif #endif
static DEFINE_SPINLOCK(balancing);