arch_update_cpu_topology function is called by the scheduler before building its sched_domain hierarchy. Move the update of the cpu topology masks in this function instead of defining it in the the store_cpu_topology which is executed only once per cpu.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 87 +++++++++++++++++++++++++++++++++----------- 1 files changed, 66 insertions(+), 21 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..d89c66c 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -43,12 +43,56 @@
struct cputopo_arm cpu_topology[NR_CPUS];
+/* + * default topology function + */ + const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_sibling; }
/* + * clear cpu topology masks + */ +static void clear_cpu_topology_mask(void) +{ + unsigned int cpuid; + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + cpumask_clear(&cpuid_topo->core_sibling); + cpumask_clear(&cpuid_topo->thread_sibling); + } + smp_wmb(); +} + +static void default_cpu_topology_mask(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id == cpu_topo->socket_id) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +/* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array @@ -57,7 +101,6 @@ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - unsigned int cpu;
/* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) @@ -99,26 +142,10 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->socket_id = -1; }
- /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id == cpu_topo->socket_id) { - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id == cpu_topo->core_id) { - cpumask_set_cpu(cpuid, - &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->thread_sibling); - } - } - } - smp_wmb(); + /* + * The core and thread sibling masks will be set during the call of + * arch_update_cpu_topology + */
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, @@ -127,6 +154,24 @@ void store_cpu_topology(unsigned int cpuid) }
/* + * arch_update_cpu_topology is called by the scheduler before building + * a new sched_domain hierarchy. + */ +int arch_update_cpu_topology(void) +{ + unsigned int cpuid; + /* clear core mask */ + clear_cpu_topology_mask(); + + /* update core and thread sibling masks */ + for_each_possible_cpu(cpuid) { + default_cpu_topology_mask(cpuid); + } + + return 1; +} + +/* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */