Fixing linaro dev address
On 16 May 2013 22:18, Chris Redpath chris.redpath@arm.com wrote:
Previously, an offline CPU would always appear to have a zero load and this would distort the offload functionality used for balancing big and little domains.
Maintain a mask of online CPUs in each domain and use this instead.
Change-Id: I639b564b2f40cb659af8ceb8bd37f84b8a1fe323 Signed-off-by: Chris Redpath chris.redpath@arm.com
arch/arm/kernel/topology.c | 6 ++++-- include/linux/sched.h | 1 + kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 4 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index fa45fb4..93ad2cd 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -381,12 +381,14 @@ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list) if(!cpumask_empty(&hmp_slow_cpu_mask)) { domain = (struct hmp_domain *) kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus); list_add(&domain->hmp_domains, hmp_domains_list); } domain = (struct hmp_domain *) kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus); list_add(&domain->hmp_domains, hmp_domains_list);
} #endif /* CONFIG_SCHED_HMP */ diff --git a/include/linux/sched.h b/include/linux/sched.h index ea439ab..0553d76 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -988,6 +988,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu); #ifdef CONFIG_SCHED_HMP struct hmp_domain { struct cpumask cpus;
struct cpumask possible_cpus; struct list_head hmp_domains;
}; #endif /* CONFIG_SCHED_HMP */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f9941e..3a53343 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3514,10 +3514,10 @@ static int __init hmp_cpu_mask_setup(void) dc = 0; list_for_each(pos, &hmp_domains) { domain = list_entry(pos, struct hmp_domain, hmp_domains);
cpulist_scnprintf(buf, 64, &domain->cpus);
cpulist_scnprintf(buf, 64, &domain->possible_cpus); pr_debug(" HMP domain %d: %s\n", dc, buf);
for_each_cpu_mask(cpu, domain->cpus) {
for_each_cpu_mask(cpu, domain->possible_cpus) { per_cpu(hmp_cpu_domain, cpu) = domain; } dc++;
@@ -3526,6 +3526,35 @@ static int __init hmp_cpu_mask_setup(void) return 1; }
+static struct hmp_domain *hmp_get_hmp_domain_for_cpu(int cpu) +{
struct hmp_domain *domain;
struct list_head *pos;
list_for_each(pos, &hmp_domains) {
domain = list_entry(pos, struct hmp_domain, hmp_domains);
if(cpumask_test_cpu(cpu, &domain->possible_cpus))
return domain;
}
return NULL;
+}
+static void hmp_online_cpu(int cpu) +{
struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
if(domain)
cpumask_set_cpu(cpu, &domain->cpus);
+}
+static void hmp_offline_cpu(int cpu) +{
struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
if(domain)
cpumask_clear_cpu(cpu, &domain->cpus);
+}
/*
- Migration thresholds should be in the range [0..1023]
- hmp_up_threshold: min. load required for migrating tasks to a faster cpu
@@ -6570,11 +6599,17 @@ void trigger_load_balance(struct rq *rq, int cpu)
static void rq_online_fair(struct rq *rq) { +#ifdef CONFIG_SCHED_HMP
hmp_online_cpu(rq->cpu);
+#endif update_sysctl(); }
static void rq_offline_fair(struct rq *rq) { +#ifdef CONFIG_SCHED_HMP
hmp_offline_cpu(rq->cpu);
+#endif update_sysctl();
/* Ensure any throttled groups are reachable by pick_next_task */
-- 1.7.9.5