From: Steve Muckle smuckle@linaro.org
An earlier patch provided a flag, rq->cpufreq_late_cb, which can be used to request a second callback into schedutil if the preemption does not occur on the target CPU.
Use this flag to implement support for remote callbacks in schedutil. If a remote callback is seen and rq->cpufreq_late_cb is not set, then it is an early callback and it is not yet known whether preemption will happen. Request a deferred callback by setting rq->cpufreq_late_cb.
If a remote callback is seen and rq->cpufreq_late_cb is set, then this is the deferred callback and preemption did not occur. Queue the irq work for this callback on the destination CPU. The irq work will carry out the fast or slow switch as appropriate.
A callback for a CPU which is not the current CPU but is a CPU in the same cpufreq policy as the current CPU is not a remote callback. These callbacks are treated the same as callbacks for the current CPU.
Signed-off-by: Steve Muckle smuckle@linaro.org Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- kernel/sched/cpufreq_schedutil.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2904f6ee7888..40c2c728602f 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -66,6 +66,18 @@ static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
/************************ Governor internals ***********************/
+static inline bool sugov_defer_remote(bool remote, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + if (remote && !rq->cpufreq_late_cb) { + rq->cpufreq_late_cb = true; + return true; + } else { + return false; + } +} + static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) { s64 delta_ns; @@ -100,14 +112,14 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu, trace_cpu_frequency(next_freq, cpu); }
-static void sugov_update_commit(struct sugov_policy *sg_policy, int cpu, - u64 time, unsigned int next_freq) +static void sugov_update_commit(struct sugov_policy *sg_policy, bool remote, + int cpu, u64 time, unsigned int next_freq) { struct cpufreq_policy *policy = sg_policy->policy;
sg_policy->last_freq_update_time = time;
- if (policy->fast_switch_enabled) { + if (policy->fast_switch_enabled && !remote) { if (sg_policy->next_freq == next_freq) { trace_cpu_frequency(policy->cur, cpu); return; @@ -117,7 +129,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, int cpu, } else if (sg_policy->next_freq != next_freq) { sg_policy->next_freq = next_freq; sg_policy->work_in_progress = true; - irq_work_queue(&sg_policy->irq_work); + irq_work_queue_on(&sg_policy->irq_work, cpu); } }
@@ -206,9 +218,13 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; + bool remote = smp_processor_id() != hook->cpu; unsigned long util, max; unsigned int next_f;
+ if (sugov_defer_remote(remote, hook->cpu)) + return; + sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time;
@@ -222,7 +238,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, sugov_iowait_boost(sg_cpu, &util, &max); next_f = get_next_freq(sg_cpu, util, max); } - sugov_update_commit(sg_policy, hook->cpu, time, next_f); + sugov_update_commit(sg_policy, remote, hook->cpu, time, next_f); }
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, @@ -282,9 +298,14 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; + struct cpufreq_policy *policy = sg_policy->policy; + bool remote = !cpumask_test_cpu(smp_processor_id(), policy->cpus); unsigned long util, max; unsigned int next_f;
+ if (sugov_defer_remote(remote, hook->cpu)) + return; + sugov_get_util(&util, &max, hook->cpu);
raw_spin_lock(&sg_policy->update_lock); @@ -298,7 +319,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
if (sugov_should_update_freq(sg_policy, time)) { next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); - sugov_update_commit(sg_policy, hook->cpu, time, next_f); + sugov_update_commit(sg_policy, remote, hook->cpu, time, next_f); }
raw_spin_unlock(&sg_policy->update_lock); -- 2.7.1.410.g6faf27b