Hi Vincent,
On 03/05/2014 12:48 PM, Vincent Guittot wrote:
Create a dedicated topology table for handling asymetric feature. The current proposal creates a new level which describes which groups of CPUs take adavantge of SD_ASYM_PACKING. The useless level will be removed during the build of the sched_domain topology.
Another solution would be to set SD_ASYM_PACKING in the sd_flags of SMT level during the boot sequence and before the build of the sched_domain topology.
Is the below what you mean as the other solution? If it is so, I would strongly recommend this approach rather than adding another level to the topology level to represent the asymmetric behaviour.
+static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES, SD_INIT_NAME(SMT) | arch_sd_sibling_asym_packing() }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +};
Regards Preeti U Murthy
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/powerpc/kernel/smp.c | 35 +++++++++++++++++++++++++++-------- kernel/sched/core.c | 6 ------ 2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ac2621a..75da054 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -755,6 +755,32 @@ int setup_profiling_timer(unsigned int multiplier) return 0; }
+#ifdef CONFIG_SCHED_SMT +/* cpumask of CPUs with asymetric SMT dependancy */ +static const struct cpumask *cpu_asmt_mask(int cpu) +{
- if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
return topology_thread_cpumask(cpu);
- }
- return cpumask_of(cpu);
+} +#endif
+static struct sched_domain_topology_level powerpc_topology[] = { +#ifdef CONFIG_SCHED_SMT
- { cpu_asmt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING, SD_INIT_NAME(ASMT) },
- { cpu_smt_mask, SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES, SD_INIT_NAME(SMT) },
+#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
+};
+static void __init set_sched_topology(void) +{
- sched_domain_topology = powerpc_topology;
+}
void __init smp_cpus_done(unsigned int max_cpus) { cpumask_var_t old_mask; @@ -779,15 +805,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
dump_numa_cpu_topology();
-}
- set_sched_topology();
-int arch_sd_sibling_asym_packing(void) -{
- if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
return SD_ASYM_PACKING;
- }
- return 0;
}
#ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3479467..7606de0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5818,11 +5818,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); }
-int __weak arch_sd_sibling_asym_packing(void) -{
return 0*SD_ASYM_PACKING;
-}
/*
- Initializers for schedule domains
- Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6000,7 +5995,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) if (sd->flags & SD_SHARE_CPUPOWER) { sd->imbalance_pct = 110; sd->smt_gain = 1178; /* ~15% */
sd->flags |= arch_sd_sibling_asym_packing();
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) { sd->imbalance_pct = 117;