We can't rely on Kconfig options to set the fast and slow CPU lists for HMP scheduling if we want a single kernel binary to support multiple devices with different CPU topology. E.g. ARM's TC2, Fast Models, or even non big.LITTLE devices.
This patch adds the function arch_get_fast_and_slow_cpus() to generate the lists at run-time by parsing the CPU nodes in device-tree; it assumes slow cores are A7s and everything else is fast. The function still supports the old Kconfig options as this is useful for testing the HMP scheduler on devices without big.LITTLE.
Signed-off-by: Jon Medhurst tixy@linaro.org ---
The reason I'm pushing this patch is that I want to use it for this month's Linaro vexpress release to help with the goal of having a single build which will work on all CoreTiles (and possibly Fast Models).
arch/arm/kernel/topology.c | 69 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/fair.c | 18 ++++-------- 2 files changed, 75 insertions(+), 12 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index f59193c..61302df 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -287,6 +287,75 @@ void store_cpu_topology(unsigned int cpuid) cpu_topology[cpuid].socket_id, mpidr); }
+ +#ifdef CONFIG_SCHED_HMP + +static const char * const little_cores[] = { + "arm,cortex-a7", + NULL, +}; + +static bool is_little_cpu(struct device_node *cn) +{ + const char * const *lc; + for (lc = little_cores; *lc; lc++) + if (of_device_is_compatible(cn, *lc)) + return true; + return false; +} + +void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, + struct cpumask *slow) +{ + struct device_node *cn = NULL; + int cpu = 0; + + cpumask_clear(fast); + cpumask_clear(slow); + + /* + * Use the config options if they are given. This helps testing + * HMP scheduling on systems without a big.LITTLE architecture. + */ + if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) { + if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast)) + WARN(1, "Failed to parse HMP fast cpu mask!\n"); + if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow)) + WARN(1, "Failed to parse HMP slow cpu mask!\n"); + return; + } + + /* + * Else, parse device tree for little cores. + */ + while ((cn = of_find_node_by_type(cn, "cpu"))) { + + if (cpu >= num_possible_cpus()) + break; + + if (is_little_cpu(cn)) + cpumask_set_cpu(cpu, slow); + else + cpumask_set_cpu(cpu, fast); + + cpu++; + } + + if (!cpumask_empty(fast) && !cpumask_empty(slow)) + return; + + /* + * We didn't find both big and little cores so let's call all cores + * fast as this will keep the system running, with all cores being + * treated equal. + */ + cpumask_setall(fast); + cpumask_clear(slow); +} + +#endif /* CONFIG_SCHED_HMP */ + + /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f705a87..e056361 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3084,25 +3084,19 @@ done:
#ifdef CONFIG_SCHED_HMP /* Heterogenous multiprocessor (HMP) optimizations - * We need to know which cpus that are fast and slow. Ideally, this - * information would be provided by the platform in some way. For now it is - * set in the kernel config. */ + * We need to know which cpus that are fast and slow. */ static struct cpumask hmp_fast_cpu_mask; static struct cpumask hmp_slow_cpu_mask;
-/* Setup fast and slow cpumasks. - * This should be setup based on device tree somehow. */ +extern void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, + struct cpumask *slow); + +/* Setup fast and slow cpumasks. */ static int __init hmp_cpu_mask_setup(void) { char buf[64];
- cpumask_clear(&hmp_fast_cpu_mask); - cpumask_clear(&hmp_slow_cpu_mask); - - if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, &hmp_fast_cpu_mask)) - WARN(1, "Failed to parse HMP fast cpu mask!\n"); - if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, &hmp_slow_cpu_mask)) - WARN(1, "Failed to parse HMP slow cpu mask!\n"); + arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
printk(KERN_DEBUG "Initializing HMP scheduler:\n"); cpulist_scnprintf(buf, 64, &hmp_fast_cpu_mask);