On 05/03/14 07:18, Vincent Guittot wrote:
Create a dedicated topology table for ARM which will create new level to differentiate CPUs that can or not powergate independantly from others.
The patch gives an example of how to add domain that will take advantage of SD_SHARE_POWERDOMAIN.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/kernel/topology.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 0bc94b1..ae8ffbc 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -185,6 +185,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return &cpu_topology[cpu].core_sibling; } +/*
- The current assumption is that we can power gate each core independently.
- This will be superseded by DT binding once available.
- */
+const struct cpumask *cpu_corepower_mask(int cpu) +{
- return &cpu_topology[cpu].thread_sibling;
+}
Although you already explained this to me in a private conversation, it's important to notice that running this set-up on a dual cluster TC2 (2 Cortex A15 - 3 Cortex A7) (no independent core power gating) we don't see the SD_SHARE_POWERDOMAIN topology flag set in the sd's on MC level because this patch-set doesn't contain the appropriate DT parsing. Like you said back then, the comment above mentions this.
- static void update_siblings_masks(unsigned int cpuid) { struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -266,6 +275,20 @@ void store_cpu_topology(unsigned int cpuid) cpu_topology[cpuid].socket_id, mpidr); } +static struct sched_domain_topology_level arm_topology[] = { +#ifdef CONFIG_SCHED_MC
- { cpu_corepower_mask, SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN, SD_INIT_NAME(GMC) },
- { cpu_coregroup_mask, SD_SHARE_PKG_RESOURCES, SD_INIT_NAME(MC) },
+#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
+};
+static void __init set_sched_topology(void) +{
- sched_domain_topology = arm_topology;
+}
- /*
- init_cpu_topology is called at boot when only one cpu is running
- which prevent simultaneous write access to cpu_topology array
@@ -289,4 +312,7 @@ void __init init_cpu_topology(void) smp_wmb(); parse_dt_topology();
- /* Set scheduler topology descriptor */
- set_sched_topology(); }
How about the core scheduler provides an interface set_sched_topology() instead of each arch has its own __init function? Sketched out below for ARM.
-- >8 -- Subject: [PATCH] sched: set_sched_topology() as an interface of core scheduler
--- arch/arm/kernel/topology.c | 7 +------ include/linux/sched.h | 2 +- kernel/sched/core.c | 5 +++++ 3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index ae8ffbc..89d5592 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -284,11 +284,6 @@ static struct sched_domain_topology_level arm_topology[] = { { NULL, }, };
-static void __init set_sched_topology(void) -{ - sched_domain_topology = arm_topology; -} - /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array @@ -314,5 +309,5 @@ void __init init_cpu_topology(void) parse_dt_topology();
/* Set scheduler topology descriptor */ - set_sched_topology(); + set_sched_topology(arm_topology); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 8831413..fefd4e7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -998,7 +998,7 @@ struct sched_domain_topology_level { #endif };
-extern struct sched_domain_topology_level *sched_domain_topology; +extern void set_sched_topology(struct sched_domain_topology_level *tl);
#ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(type) .name = #type diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b4fb0df..a748c92 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6105,6 +6105,11 @@ static struct sched_domain_topology_level default_topology[] = {
struct sched_domain_topology_level *sched_domain_topology = default_topology;
+void set_sched_topology(struct sched_domain_topology_level *tl) +{ + sched_domain_topology = tl; +} + #define for_each_sd_topology(tl) \ for (tl = sched_domain_topology; tl->mask; tl++)