From: Steve Muckle smuckle@linaro.org
Upcoming support for remote callbacks from the scheduler into schedutil requires that the CPU identified in the hook structure be used to indicate the CPU being operated on, rather than relying on smp_processor_id().
Signed-off-by: Steve Muckle smuckle@linaro.org Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/cpufreq_schedutil.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cf7b8cb4ffe..2904f6ee7888 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -100,8 +100,8 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu, trace_cpu_frequency(next_freq, cpu); }
-static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, - unsigned int next_freq) +static void sugov_update_commit(struct sugov_policy *sg_policy, int cpu, + u64 time, unsigned int next_freq) { struct cpufreq_policy *policy = sg_policy->policy;
@@ -109,11 +109,11 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
if (policy->fast_switch_enabled) { if (sg_policy->next_freq == next_freq) { - trace_cpu_frequency(policy->cur, smp_processor_id()); + trace_cpu_frequency(policy->cur, cpu); return; } sg_policy->next_freq = next_freq; - sugov_fast_switch(sg_policy, smp_processor_id(), next_freq); + sugov_fast_switch(sg_policy, cpu, next_freq); } else if (sg_policy->next_freq != next_freq) { sg_policy->next_freq = next_freq; sg_policy->work_in_progress = true; @@ -159,12 +159,12 @@ static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, return cpufreq_driver_resolve_freq(policy, freq); }
-static void sugov_get_util(unsigned long *util, unsigned long *max) +static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu) { - struct rq *rq = this_rq(); + struct rq *rq = cpu_rq(cpu); unsigned long cfs_max;
- cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id()); + cfs_max = arch_scale_cpu_capacity(NULL, cpu);
*util = min(rq->cfs.avg.util_avg, cfs_max); *max = cfs_max; @@ -218,11 +218,11 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (flags & SCHED_CPUFREQ_RT_DL) { next_f = policy->cpuinfo.max_freq; } else { - sugov_get_util(&util, &max); + sugov_get_util(&util, &max, hook->cpu); sugov_iowait_boost(sg_cpu, &util, &max); next_f = get_next_freq(sg_cpu, util, max); } - sugov_update_commit(sg_policy, time, next_f); + sugov_update_commit(sg_policy, hook->cpu, time, next_f); }
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, @@ -245,10 +245,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, unsigned long j_util, j_max; s64 delta_ns;
- if (j == smp_processor_id()) + j_sg_cpu = &per_cpu(sugov_cpu, j); + if (j_sg_cpu == sg_cpu) continue;
- j_sg_cpu = &per_cpu(sugov_cpu, j); /* * If the CPU utilization was last updated before the previous * frequency update and the time elapsed between the last update @@ -285,7 +285,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, unsigned long util, max; unsigned int next_f;
- sugov_get_util(&util, &max); + sugov_get_util(&util, &max, hook->cpu);
raw_spin_lock(&sg_policy->update_lock);
@@ -298,7 +298,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
if (sugov_should_update_freq(sg_policy, time)) { next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); - sugov_update_commit(sg_policy, time, next_f); + sugov_update_commit(sg_policy, hook->cpu, time, next_f); }
raw_spin_unlock(&sg_policy->update_lock); -- 2.7.1.410.g6faf27b