arch_update_cpu_topology function is called by the scheduler before building its sched_domain hierarchy. Prepare the update of the cpu topology masks in this function in addition to set it in the the store_cpu_topology which is executed only once per cpu.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 108 +++++++++++++++++++++++++++++++++++--------- 1 files changed, 87 insertions(+), 21 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..90352cb 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -43,12 +43,76 @@
struct cputopo_arm cpu_topology[NR_CPUS];
+/* + * cpu topology mask management + */ + +unsigned int advanced_topology = 0; + +/* + * default topology function + */ + const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_sibling; }
/* + * clear cpu topology masks + */ +static void clear_cpu_topology_mask(void) +{ + unsigned int cpuid; + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + cpumask_clear(&cpuid_topo->core_sibling); + cpumask_clear(&cpuid_topo->thread_sibling); + } + smp_wmb(); +} + +/* + * default_cpu_topology_mask set the core and thread mask as described in the + * ARM ARM + */ +static inline void default_cpu_topology_mask(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id == cpu_topo->socket_id) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +static void normal_cpu_topology_mask(void) +{ + unsigned int cpuid; + + for_each_possible_cpu(cpuid) { + default_cpu_topology_mask(cpuid); + } + smp_wmb(); +} + +/* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array @@ -57,7 +121,6 @@ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - unsigned int cpu;
/* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) @@ -99,26 +162,11 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->socket_id = -1; }
- /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id == cpu_topo->socket_id) { - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id == cpu_topo->core_id) { - cpumask_set_cpu(cpuid, - &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->thread_sibling); - } - } - } - smp_wmb(); + /* + * The core and thread sibling masks can also be updated during the + * call of arch_update_cpu_topology + */ + default_cpu_topology_mask(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, @@ -127,6 +175,24 @@ void store_cpu_topology(unsigned int cpuid) }
/* + * arch_update_cpu_topology is called by the scheduler before building + * a new sched_domain hierarchy. + */ +int arch_update_cpu_topology(void) +{ + if (!advanced_topology) + return 0; + + /* clear core mask */ + clear_cpu_topology_mask(); + + /* update core and thread sibling masks */ + normal_cpu_topology_mask(); + + return 1; +} + +/* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */