This patchset creates an arch_scale_freq_power function for ARM, which is used to set the relative capacity of each core of a big.LITTLE system.
Vincent Guittot (4): ARM: topology: Add arch_scale_freq_power function ARM: topology: factorize the update of sibling masks ARM: topology: Update cpu_power according to DT information sched: cpu_power: enable ARCH_POWER
arch/arm/include/asm/topology.h | 2 + arch/arm/kernel/topology.c | 203 +++++++++++++++++++++++++++++++++++---- kernel/sched/features.h | 2 +- 3 files changed, 185 insertions(+), 22 deletions(-)
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/include/asm/topology.h | 2 ++ arch/arm/kernel/topology.c | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84..78e4c85 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -27,11 +27,13 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu);
+void set_power_scale(unsigned int cpu, unsigned long power); #else
static inline void init_cpu_topology(void) { } static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline void set_power_scale(unsigned int cpu, unsigned long power) { } #endif
#include <asm-generic/topology.h> diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..00301a7 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,35 @@ #include <asm/cputype.h> #include <asm/topology.h>
+/* + * cpu power scale management + */ + +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account for load balance. A per cpu structure is + * preferred because each cpu is mainly using its own cpu_power even it's not + * always true because of nohz_idle_balance + */ +static DEFINE_PER_CPU(unsigned long, cpu_scale); + +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + +/* + * cpu topology management + */ + #define MPIDR_SMP_BITMASK (0x3 << 30) #define MPIDR_SMP_VALUE (0x2 << 30)
@@ -41,6 +70,9 @@ #define MPIDR_LEVEL2_MASK 0xFF #define MPIDR_LEVEL2_SHIFT 16
+/* + * cpu topology table + */ struct cputopo_arm cpu_topology[NR_CPUS];
const struct cpumask *cpu_coregroup_mask(int cpu) @@ -134,7 +166,7 @@ void init_cpu_topology(void) { unsigned int cpu;
- /* init core mask */ + /* init core mask and power*/ for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
@@ -143,6 +175,8 @@ void init_cpu_topology(void) cpu_topo->socket_id = -1; cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); + + per_cpu(cpu_scale, cpu) = SCHED_POWER_SCALE; } smp_wmb(); }
Hi Vincent,
On Tue, Jun 12, 2012 at 2:02 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/include/asm/topology.h | 2 ++ arch/arm/kernel/topology.c | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84..78e4c85 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -27,11 +27,13 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu);
+void set_power_scale(unsigned int cpu, unsigned long power); #else
static inline void init_cpu_topology(void) { } static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline void set_power_scale(unsigned int cpu, unsigned long power) { } #endif
#include <asm-generic/topology.h> diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..00301a7 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,35 @@ #include <asm/cputype.h> #include <asm/topology.h>
+/*
- cpu power scale management
- */
+/*
- cpu power table
- This per cpu data structure describes the relative capacity of each core.
- On a heteregenous system, cores don't have the same computation capacity
- and we reflect that difference in the cpu_power field so the scheduler can
- take this difference into account for load balance. A per cpu structure is
- preferred because each cpu is mainly using its own cpu_power even it's not
- always true because of nohz_idle_balance
The end of the comment is unclear IMO; Can you give more details on the relation between cpu_power and nohz_idle_balance?
Regards, Jean
On 13 June 2012 10:50, Jean Pihet jean.pihet@newoldbits.com wrote:
Hi Vincent,
On Tue, Jun 12, 2012 at 2:02 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/include/asm/topology.h | 2 ++ arch/arm/kernel/topology.c | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84..78e4c85 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -27,11 +27,13 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu);
+void set_power_scale(unsigned int cpu, unsigned long power); #else
static inline void init_cpu_topology(void) { } static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline void set_power_scale(unsigned int cpu, unsigned long power) { } #endif
#include <asm-generic/topology.h> diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..00301a7 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,35 @@ #include <asm/cputype.h> #include <asm/topology.h>
+/*
- cpu power scale management
- */
+/*
- cpu power table
- This per cpu data structure describes the relative capacity of each core.
- On a heteregenous system, cores don't have the same computation capacity
- and we reflect that difference in the cpu_power field so the scheduler can
- take this difference into account for load balance. A per cpu structure is
- preferred because each cpu is mainly using its own cpu_power even it's not
- always true because of nohz_idle_balance
The end of the comment is unclear IMO; Can you give more details on the relation between cpu_power and nohz_idle_balance?
When several cores are idle, one core runs the load balance for all idle cores. The update of the cpu_power can be done during rebalance and arch_scale_freq_power can be called by CPU0 for updating the cpu_power of CPU1.
I was probably not clear enough in my first explanation. I will reword the comment
Regards, Vincent
Regards, Jean
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
} smp_wmb();
}
You know what.. we should make checkpatch report an error for memory barriers that don't have a comment.
I know this isn't added by this patch, but every time I see something like it I cry a little.
On Wed, Jun 13, 2012 at 02:52:49PM +0200, Peter Zijlstra wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
} smp_wmb();
}
You know what.. we should make checkpatch report an error for memory barriers that don't have a comment.
I know this isn't added by this patch, but every time I see something like it I cry a little.
Actually if it was a new line, it would have been reported, at least in theory:
# check for memory barriers without a comment. if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)(/) { if (!ctx_has_comment($first_line, $linenr)) { CHK("MEMORY_BARRIER", "memory barrier without comment\n" . $herecurr); } }
-apw
On Wed, 2012-06-13 at 20:27 +0100, Andy Whitcroft wrote:
On Wed, Jun 13, 2012 at 02:52:49PM +0200, Peter Zijlstra wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
} smp_wmb();
}
You know what.. we should make checkpatch report an error for memory barriers that don't have a comment.
I know this isn't added by this patch, but every time I see something like it I cry a little.
Actually if it was a new line, it would have been reported, at least in theory:
# check for memory barriers without a comment. if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
if (!ctx_has_comment($first_line, $linenr)) { CHK("MEMORY_BARRIER", "memory barrier without comment\n" . $herecurr); } }
Maybe promote that CHK to a WARN.
On Wed, 2012-06-13 at 20:27 +0100, Andy Whitcroft wrote:
Actually if it was a new line, it would have been reported, at least in theory:
# check for memory barriers without a comment. if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
if (!ctx_has_comment($first_line, $linenr)) { CHK("MEMORY_BARRIER", "memory barrier without comment\n" . $herecurr); } }
Oh sweet, we already have something for that.. But yeah I'd promote that CHK to something stronger like Joe said. Maybe even error. Memory barriers are magic, not adding proper comments should be a capital offense.
The factorization has also be proposed in another patch that is not merge yet. http://lists.infradead.org/pipermail/linux-arm-kernel/2012-January/080873.ht... So it could be dropped depending of the state of the other patch.
Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 47 ++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 21 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 00301a7..2f85a64 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -80,6 +80,31 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return &cpu_topology[cpu].core_sibling; }
+void update_siblings_masks(unsigned int cpuid) +{ + struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; + int cpu; + /* update core and thread sibling masks */ + for_each_possible_cpu(cpu) { + cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id == cpu_topo->socket_id) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, @@ -89,7 +114,6 @@ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - unsigned int cpu;
/* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) @@ -131,26 +155,7 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->socket_id = -1; }
- /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id == cpu_topo->socket_id) { - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id == cpu_topo->core_id) { - cpumask_set_cpu(cpuid, - &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->thread_sibling); - } - } - } - smp_wmb(); + update_siblings_masks(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id,
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 122 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 2f85a64..0c2aee4 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -17,6 +17,7 @@ #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> +#include <linux/of.h> #include <linux/sched.h>
#include <asm/cputype.h> @@ -47,6 +48,122 @@ void set_power_scale(unsigned int cpu, unsigned long power) per_cpu(cpu_scale, cpu) = power; }
+#ifdef CONFIG_OF +struct cpu_efficiency { + const char *compatible; + unsigned long efficiency; +}; + +/* + * Table of relative efficiency of each processors + * The efficiency value must fit in 20bit. The final + * cpu_scale value must be in the range [1:2048[. + * Processors that are not defined in the table, + * use the default SCHED_POWER_SCALE value for cpu_scale. + */ +struct cpu_efficiency table_efficiency[] = { + {"arm,cortex-a15", 3891}, + {"arm,cortex-a7", 2048}, + {NULL, }, +}; + +struct cpu_capacity { + unsigned long hwid; + unsigned long capacity; +}; + +struct cpu_capacity cpu_capacity[NR_CPUS]; + +unsigned long middle_capacity = 1; + +static void __init parse_dt_topology(void) +{ + struct cpu_efficiency *cpu_eff; + struct device_node *cn = NULL; + unsigned long min_capacity = (unsigned long)(-1); + unsigned long max_capacity = 0; + unsigned long capacity = 0; + int cpu = 0; + + while ((cn = of_find_node_by_type(cn, "cpu"))) { + const u32 *rate, *reg; + char *compatible; + int len; + + if (cpu >= num_possible_cpus()) + break; + + compatible = of_get_property(cn, "compatible", &len); + + for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) + if (of_device_is_compatible(cn, cpu_eff->compatible)) + break; + + if (cpu_eff->compatible == NULL) + continue; + + rate = of_get_property(cn, "clock-frequency", &len); + if (!rate || len != 4) { + pr_err("%s missing clock-frequency property\n", + cn->full_name); + continue; + } + + reg = of_get_property(cn, "reg", &len); + if (!reg || len != 4) { + pr_err("%s missing reg property\n", cn->full_name); + continue; + } + + capacity = ((be32_to_cpup(rate)) >> 20) + * cpu_eff->efficiency; + + /* Save min capacity of the system */ + if (capacity < min_capacity) + min_capacity = capacity; + + /* Save max capacity of the system */ + if (capacity > max_capacity) + max_capacity = capacity; + + cpu_capacity[cpu].capacity = capacity; + cpu_capacity[cpu++].hwid = be32_to_cpup(reg); + } + + if (cpu < num_possible_cpus()) + cpu_capacity[cpu].hwid = (unsigned long)(-1); + + middle_capacity = (min_capacity + max_capacity) >> 11; +} + +void update_cpu_power(unsigned int cpu, unsigned long hwid) +{ + unsigned int idx = 0; + + /* look for the cpu's hwid in the cpu capacity table */ + for (idx = 0; idx < num_possible_cpus(); idx++) { + if (cpu_capacity[idx].hwid == hwid) + break; + + if (cpu_capacity[idx].hwid == -1) + return; + } + + if (idx == num_possible_cpus()) + return; + + set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); + + printk(KERN_INFO "CPU%u: update cpu_power %lu\n", + cpu, arch_scale_freq_power(NULL, cpu)); +} + +#else +static inline void parse_dt_topology(void) {} +static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} +#endif + + /* * cpu topology management */ @@ -60,6 +177,7 @@ void set_power_scale(unsigned int cpu, unsigned long power) * These masks reflect the current use of the affinity levels. * The affinity level can be up to 16 bits according to ARM ARM */ +#define MPIDR_HWID_BITMASK 0xFFFFFF
#define MPIDR_LEVEL0_MASK 0x3 #define MPIDR_LEVEL0_SHIFT 0 @@ -157,6 +275,8 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
+ update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); + printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, @@ -184,4 +304,6 @@ void init_cpu_topology(void) per_cpu(cpu_scale, cpu) = SCHED_POWER_SCALE; } smp_wmb(); + + parse_dt_topology(); }
Vincent,
On Tue, Jun 12, 2012 at 2:02 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/kernel/topology.c | 122 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 2f85a64..0c2aee4 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -17,6 +17,7 @@ #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> +#include <linux/of.h> #include <linux/sched.h>
#include <asm/cputype.h> @@ -47,6 +48,122 @@ void set_power_scale(unsigned int cpu, unsigned long power) per_cpu(cpu_scale, cpu) = power; }
+#ifdef CONFIG_OF +struct cpu_efficiency {
- const char *compatible;
- unsigned long efficiency;
+};
+/*
- Table of relative efficiency of each processors
- The efficiency value must fit in 20bit. The final
- cpu_scale value must be in the range [1:2048[.
Typo here.
- Processors that are not defined in the table,
- use the default SCHED_POWER_SCALE value for cpu_scale.
- */
+struct cpu_efficiency table_efficiency[] = {
- {"arm,cortex-a15", 3891},
- {"arm,cortex-a7", 2048},
How are those results measured or computed? Is this purely related to the number crunching performance?
Also more generally what if the cores frequencies are changing?
Regards, Jean
On 13 June 2012 10:59, Jean Pihet jean.pihet@newoldbits.com wrote:
Vincent,
On Tue, Jun 12, 2012 at 2:02 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/kernel/topology.c | 122 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 2f85a64..0c2aee4 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -17,6 +17,7 @@ #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> +#include <linux/of.h> #include <linux/sched.h>
#include <asm/cputype.h> @@ -47,6 +48,122 @@ void set_power_scale(unsigned int cpu, unsigned long power) per_cpu(cpu_scale, cpu) = power; }
+#ifdef CONFIG_OF +struct cpu_efficiency {
- const char *compatible;
- unsigned long efficiency;
+};
+/*
- Table of relative efficiency of each processors
- The efficiency value must fit in 20bit. The final
- cpu_scale value must be in the range [1:2048[.
Typo here.
I realize that I have use absolute value instead of SCHED_POWER_SCALE. The cpu_scale value must be in the range 0 < cpu_scale < 2*SCHED_POWER_SCALE
- Processors that are not defined in the table,
- use the default SCHED_POWER_SCALE value for cpu_scale.
- */
+struct cpu_efficiency table_efficiency[] = {
- {"arm,cortex-a15", 3891},
- {"arm,cortex-a7", 2048},
How are those results measured or computed? Is this purely related to the number crunching performance?
These values are based on ARM's figures which say that Cortex-A15 is 1,9 faster than Cortex-A7 at same frequency. So the inputs are ARM's figures. Then, the absolute values are arbitrary with the constraint of being large enough for precision and small enough to make the computation in an unsigned long
Also more generally what if the cores frequencies are changing?
Up to now, the scheduler takes into account the maximum capacity of a core when it checks the load balance of the system.
Regards, Vincent
Regards, Jean
On Wed, Jun 13, 2012 at 3:14 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
On 13 June 2012 10:59, Jean Pihet jean.pihet@newoldbits.com wrote:
Vincent,
On Tue, Jun 12, 2012 at 2:02 PM, Vincent Guittot vincent.guittot@linaro.org wrote:
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
arch/arm/kernel/topology.c | 122 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 2f85a64..0c2aee4 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -17,6 +17,7 @@ #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> +#include <linux/of.h> #include <linux/sched.h>
#include <asm/cputype.h> @@ -47,6 +48,122 @@ void set_power_scale(unsigned int cpu, unsigned long power) per_cpu(cpu_scale, cpu) = power; }
+#ifdef CONFIG_OF +struct cpu_efficiency {
- const char *compatible;
- unsigned long efficiency;
+};
+/*
- Table of relative efficiency of each processors
- The efficiency value must fit in 20bit. The final
- cpu_scale value must be in the range [1:2048[.
Typo here.
I realize that I have use absolute value instead of SCHED_POWER_SCALE. The cpu_scale value must be in the range 0 < cpu_scale < 2*SCHED_POWER_SCALE
- Processors that are not defined in the table,
- use the default SCHED_POWER_SCALE value for cpu_scale.
- */
+struct cpu_efficiency table_efficiency[] = {
- {"arm,cortex-a15", 3891},
- {"arm,cortex-a7", 2048},
How are those results measured or computed? Is this purely related to the number crunching performance?
These values are based on ARM's figures which say that Cortex-A15 is 1,9 faster than Cortex-A7 at same frequency. So the inputs are ARM's figures. Then, the absolute values are arbitrary with the constraint of being large enough for precision and small enough to make the computation in an unsigned long
Also more generally what if the cores frequencies are changing?
Up to now, the scheduler takes into account the maximum capacity of a core when it checks the load balance of the system.
Jean,
Various discussions around power-aware scheduling have amplified the need for the scheduler to have some knowledge of DVFS. This would then require the scheduler to track 'cpu_power' ( = max power) and perhaps a new variable 'current_power' that is changed by the DVFS framework.
The first goal, though, is to make sure that the scheduler can handle different cpu_power values due to asymmetric cores.
/Amit
On Wed, 2012-06-13 at 18:14 +0530, Amit Kucheria wrote:
Various discussions around power-aware scheduling have amplified the need for the scheduler to have some knowledge of DVFS. This would then require the scheduler to track 'cpu_power' ( = max power) and perhaps a new variable 'current_power' that is changed by the DVFS framework.
Note that capacity is in fact a better term -- not to be confused with the capacity as currently in use within the load-balancer. Luckily there's no way to read that an not be confused.. uhmm :-)
The first goal, though, is to make sure that the scheduler can handle different cpu_power values due to asymmetric cores.
I would think the very first goal was to do a simple packing balancer before doing silly things with asymmetric setups.. but what do I know.
Hi Amit, Peter,
On Wed, Jun 13, 2012 at 2:47 PM, Peter Zijlstra peterz@infradead.org wrote:
On Wed, 2012-06-13 at 18:14 +0530, Amit Kucheria wrote:
Various discussions around power-aware scheduling have amplified the need for the scheduler to have some knowledge of DVFS. This would then require the scheduler to track 'cpu_power' ( = max power) and perhaps a new variable 'current_power' that is changed by the DVFS framework.
Note that capacity is in fact a better term -- not to be confused with the capacity as currently in use within the load-balancer. Luckily there's no way to read that an not be confused.. uhmm :-)
The first goal, though, is to make sure that the scheduler can handle different cpu_power values due to asymmetric cores.
I would think the very first goal was to do a simple packing balancer before doing silly things with asymmetric setups.. but what do I know.
Sure! First things first ;p
Thanks for the details!
Regards, Jean
On 13 June 2012 14:47, Peter Zijlstra peterz@infradead.org wrote:
On Wed, 2012-06-13 at 18:14 +0530, Amit Kucheria wrote:
Various discussions around power-aware scheduling have amplified the need for the scheduler to have some knowledge of DVFS. This would then require the scheduler to track 'cpu_power' ( = max power) and perhaps a new variable 'current_power' that is changed by the DVFS framework.
Note that capacity is in fact a better term -- not to be confused with the capacity as currently in use within the load-balancer. Luckily there's no way to read that an not be confused.. uhmm :-)
The first goal, though, is to make sure that the scheduler can handle different cpu_power values due to asymmetric cores.
I would think the very first goal was to do a simple packing balancer before doing silly things with asymmetric setups.. but what do I know.
In fact, there is 2 tracks, one for a power aware scheduler and one for setting the capacity of each core of a big.LITTLE system. This patch is for the latter and is not directly link to the power because the default/performance mode of the scheduler should also take advantage of this information
On Wed, 2012-06-13 at 15:29 +0200, Vincent Guittot wrote:
In fact, there is 2 tracks, one for a power aware scheduler and one for setting the capacity of each core of a big.LITTLE system. This patch is for the latter and is not directly link to the power because the default/performance mode of the scheduler should also take advantage of this information
Fair enough.. can you include a little blurp in the changelog on how this affects the capacity stuff for big.little and if that works as desired due to these patches?
On 13 June 2012 15:32, Peter Zijlstra peterz@infradead.org wrote:
On Wed, 2012-06-13 at 15:29 +0200, Vincent Guittot wrote:
In fact, there is 2 tracks, one for a power aware scheduler and one for setting the capacity of each core of a big.LITTLE system. This patch is for the latter and is not directly link to the power because the default/performance mode of the scheduler should also take advantage of this information
Fair enough.. can you include a little blurp in the changelog on how this affects the capacity stuff for big.little and if that works as desired due to these patches?
OK, I will add explanations about how this affect the cpu_power field and the load balance of a big.LITTLE system, what is enhanced with this modification and what is not.
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
+struct cpu_capacity cpu_capacity[NR_CPUS];
I know ARM isn't likely to suffer from the 4k cpu issue, but is there a reason to use a NR_CPUS array over a per-cpu variable?
On 13 June 2012 15:07, Peter Zijlstra peterz@infradead.org wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
+struct cpu_capacity cpu_capacity[NR_CPUS];
I know ARM isn't likely to suffer from the 4k cpu issue, but is there a reason to use a NR_CPUS array over a per-cpu variable?
At this stage, we don't know which logical CPU will match which hwid. During the boot of each CPU, we parse the table to found an efficiency value for the booting CPU. The whole table is used by one CPU at a time.
On Wed, 2012-06-13 at 16:54 +0200, Vincent Guittot wrote:
On 13 June 2012 15:07, Peter Zijlstra peterz@infradead.org wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
+struct cpu_capacity cpu_capacity[NR_CPUS];
I know ARM isn't likely to suffer from the 4k cpu issue, but is there a reason to use a NR_CPUS array over a per-cpu variable?
At this stage, we don't know which logical CPU will match which hwid. During the boot of each CPU, we parse the table to found an efficiency value for the booting CPU. The whole table is used by one CPU at a time.
Its not so much the usage as the dynamic sizing that I was after. NR_CPUS will always be the max size, whereas per-cpu data will only use the amount of storage required to back the number of cpus present.
For this reason we've spend a great deal of effort to remove NR_CPUS sized arrays all over the core (and x86/ia64 arch) code, since distros now build with NR_CPUS=4096 but hardly anybody has that many cpus, so arrays sized that way waste tons of resources.
On Wed, 13 Jun 2012, Peter Zijlstra wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
+#ifdef CONFIG_OF
That must really be the worst CONFIG_ name ever..
Indeed! We must corner Grant to do a s/CONFIG_OF/CONFIG_DEVICE_TREE/ on the whole tree and send the patch to Linus. He just managed to chicken away from doing it so far.
Nicolas
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system
Can you provide a little description in the form of a code comment near update_cpu_power() as to wtf all that does? This OF nonsense is unreadable.
Heteregeneous ARM platform uses arch_scale_freq_power function to reflect the relative capacity of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index de00a48..d98ae90 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -42,7 +42,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* * Use arch dependent cpu power functions */ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, true)
SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false)
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
Heteregeneous ARM platform uses arch_scale_freq_power function to reflect the relative capacity of each core
I think I've pointed out before that this breaks x86.. you need a patch killing at that stuff before this.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index de00a48..d98ae90 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -42,7 +42,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) /*
- Use arch dependent cpu power functions
*/ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false)
On 13 June 2012 14:50, Peter Zijlstra peterz@infradead.org wrote:
On Tue, 2012-06-12 at 14:02 +0200, Vincent Guittot wrote:
Heteregeneous ARM platform uses arch_scale_freq_power function to reflect the relative capacity of each core
I think I've pointed out before that this breaks x86.. you need a patch killing at that stuff before this.
In v3.4, x86 hasn't got any specific declaration for arch_scale_freq_power so it would now use the weak arch_scale_freq_power which calls default_scale_freq_power. Isn't it enough ?
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index de00a48..d98ae90 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -42,7 +42,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* * Use arch dependent cpu power functions */ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, true)
SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false)
On Wed, 2012-06-13 at 15:20 +0200, Vincent Guittot wrote:
In v3.4, x86 hasn't got any specific declaration for arch_scale_freq_power so it would now use the weak arch_scale_freq_power which calls default_scale_freq_power. Isn't it enough ?
--- Subject: sched, x86: Remove broken power estimation From: Peter Zijlstra a.p.zijlstra@chello.nl Date: Wed Jun 13 15:24:45 CEST 2012
The x86 sched power implementation has been broken forever and gets in the way of other stuff, remove it.
For archaeological interest, fixing this code would require dealing with the cross-cpu calling of these functions and more importantly, we need to filter idle time out of the a/m-perf stuff because the ratio will go down to 0 when idle, giving a 0 capacity which is not what we'd want.
Signed-off-by: Peter Zijlstra a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/n/tip-wjjwelpti8f8k7i1pdnzmdr8@git.kernel.org --- arch/x86/kernel/cpu/Makefile | 2 - arch/x86/kernel/cpu/sched.c | 55 ------------------------------------------- 2 files changed, 1 insertion(+), 56 deletions(-)
--- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o -obj-y += vmware.o hypervisor.o sched.o mshyperv.o +obj-y += vmware.o hypervisor.o mshyperv.o obj-y += rdrand.o obj-y += match.o
--- a/arch/x86/kernel/cpu/sched.c +++ /dev/null @@ -1,55 +0,0 @@ -#include <linux/sched.h> -#include <linux/math64.h> -#include <linux/percpu.h> -#include <linux/irqflags.h> - -#include <asm/cpufeature.h> -#include <asm/processor.h> - -#ifdef CONFIG_SMP - -static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched); - -static unsigned long scale_aperfmperf(void) -{ - struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched); - unsigned long ratio, flags; - - local_irq_save(flags); - get_aperfmperf(&val); - local_irq_restore(flags); - - ratio = calc_aperfmperf_ratio(old, &val); - *old = val; - - return ratio; -} - -unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) -{ - /* - * do aperf/mperf on the cpu level because it includes things - * like turbo mode, which are relevant to full cores. - */ - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) - return scale_aperfmperf(); - - /* - * maybe have something cpufreq here - */ - - return default_scale_freq_power(sd, cpu); -} - -unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu) -{ - /* - * aperf/mperf already includes the smt gain - */ - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) - return SCHED_LOAD_SCALE; - - return default_scale_smt_power(sd, cpu); -} - -#endif
On 13 June 2012 15:28, Peter Zijlstra peterz@infradead.org wrote:
On Wed, 2012-06-13 at 15:20 +0200, Vincent Guittot wrote:
In v3.4, x86 hasn't got any specific declaration for arch_scale_freq_power so it would now use the weak arch_scale_freq_power which calls default_scale_freq_power. Isn't it enough ?
Subject: sched, x86: Remove broken power estimation From: Peter Zijlstra a.p.zijlstra@chello.nl Date: Wed Jun 13 15:24:45 CEST 2012
The x86 sched power implementation has been broken forever and gets in the way of other stuff, remove it.
For archaeological interest, fixing this code would require dealing with the cross-cpu calling of these functions and more importantly, we need to filter idle time out of the a/m-perf stuff because the ratio will go down to 0 when idle, giving a 0 capacity which is not what we'd want.
Signed-off-by: Peter Zijlstra a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/n/tip-wjjwelpti8f8k7i1pdnzmdr8@git.kernel.org
arch/x86/kernel/cpu/Makefile | 2 - arch/x86/kernel/cpu/sched.c | 55 ------------------------------------------- 2 files changed, 1 insertion(+), 56 deletions(-)
--- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o -obj-y += vmware.o hypervisor.o sched.o mshyperv.o +obj-y += vmware.o hypervisor.o mshyperv.o obj-y += rdrand.o obj-y += match.o
--- a/arch/x86/kernel/cpu/sched.c +++ /dev/null @@ -1,55 +0,0 @@ -#include <linux/sched.h> -#include <linux/math64.h> -#include <linux/percpu.h> -#include <linux/irqflags.h>
-#include <asm/cpufeature.h> -#include <asm/processor.h>
-#ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
-static unsigned long scale_aperfmperf(void) -{
- struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
- unsigned long ratio, flags;
- local_irq_save(flags);
- get_aperfmperf(&val);
- local_irq_restore(flags);
- ratio = calc_aperfmperf_ratio(old, &val);
- *old = val;
- return ratio;
-}
-unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) -{
- /*
- * do aperf/mperf on the cpu level because it includes things
- * like turbo mode, which are relevant to full cores.
- */
- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
- return scale_aperfmperf();
- /*
- * maybe have something cpufreq here
- */
- return default_scale_freq_power(sd, cpu);
-}
-unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu) -{
- /*
- * aperf/mperf already includes the smt gain
- */
- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
- return SCHED_LOAD_SCALE;
- return default_scale_smt_power(sd, cpu);
-}
-#endif
Sorry for the misses, I need to update my tags because this has been filtered.