This patchset creates an arch_scale_freq_power function for ARM, which is used to set the relative capacity of each core of a big.LITTLE system. It also removes the broken power estimation of x86.
Modification since v2: - set_power_scale function becomes static - Rework loop in update_siblings_masks - Remove useless code in parse_dt_topology
Modification since v1: - Add and update explanation about the use of the table and the range of the value - Remove the use of NR_CPUS and use nr_cpu_ids instead - Remove broken power estimation of x86
Peter Zijlstra (1): sched, x86: Remove broken power estimation
Vincent Guittot (4): ARM: topology: Add arch_scale_freq_power function ARM: topology: factorize the update of sibling masks ARM: topology: Update cpu_power according to DT information sched: cpu_power: enable ARCH_POWER
arch/arm/kernel/topology.c | 209 +++++++++++++++++++++++++++++++++++++----- arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/sched.c | 55 ----------- kernel/sched/features.h | 2 +- 4 files changed, 189 insertions(+), 79 deletions(-) delete mode 100644 arch/x86/kernel/cpu/sched.c
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200dea..51f23b3 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,37 @@ #include <asm/cputype.h> #include <asm/topology.h>
+/* + * cpu power scale management + */ + +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account during load balance. A per cpu structure + * is preferred because each CPU updates its own cpu_power field during the + * load balance except for idle cores. One idle core is selected to run the + * rebalance_domains for all idle cores and the cpu_power can be updated + * during this sequence. + */ +static DEFINE_PER_CPU(unsigned long, cpu_scale); + +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +static void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + +/* + * cpu topology management + */ + #define MPIDR_SMP_BITMASK (0x3 << 30) #define MPIDR_SMP_VALUE (0x2 << 30)
@@ -41,6 +72,9 @@ #define MPIDR_LEVEL2_MASK 0xFF #define MPIDR_LEVEL2_SHIFT 16
+/* + * cpu topology table + */ struct cputopo_arm cpu_topology[NR_CPUS];
const struct cpumask *cpu_coregroup_mask(int cpu) @@ -134,7 +168,7 @@ void init_cpu_topology(void) { unsigned int cpu;
- /* init core mask */ + /* init core mask and power*/ for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
@@ -143,6 +177,8 @@ void init_cpu_topology(void) cpu_topo->socket_id = -1; cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); + + set_power_scale(cpu, SCHED_POWER_SCALE); } smp_wmb(); }
On Wed, 20 Jun 2012 17:19:18 +0200, Vincent Guittot wrote:
Add infrastructure to be able to modify the cpu_power of each core
Signed-off-by: Vincent Guittot vincent.guittot-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org
Reviewed-by: Namhyung Kim namhyung@kernel.org
Thanks.
This factorization has also been proposed in another patchset that has not been merged yet: http://lists.infradead.org/pipermail/linux-arm-kernel/2012-January/080873.ht... So, this patch could be dropped depending of the state of the other one.
Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 48 +++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 21 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 51f23b3..eb5fc81 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -82,6 +82,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return &cpu_topology[cpu].core_sibling; }
+void update_siblings_masks(unsigned int cpuid) +{ + struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; + int cpu; + + /* update core and thread sibling masks */ + for_each_possible_cpu(cpu) { + cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id != cpu_topo->socket_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id != cpu_topo->core_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); + } + smp_wmb(); +} + /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, @@ -91,7 +117,6 @@ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - unsigned int cpu;
/* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) @@ -133,26 +158,7 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->socket_id = -1; }
- /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id == cpu_topo->socket_id) { - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id == cpu_topo->core_id) { - cpumask_set_cpu(cpuid, - &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, - &cpuid_topo->thread_sibling); - } - } - } - smp_wmb(); + update_siblings_masks(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id,
On Wed, 20 Jun 2012 17:19:19 +0200, Vincent Guittot wrote:
This factorization has also been proposed in another patchset that has not been merged yet: http://lists.infradead.org/pipermail/linux-arm-kernel/2012-January/080873.ht... So, this patch could be dropped depending of the state of the other one.
Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Signed-off-by: Vincent Guittot vincent.guittot@linaro.org
Reviewed-by: Namhyung Kim namhyung@kernel.org
arch/arm/kernel/topology.c | 48 +++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 21 deletions(-)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 51f23b3..eb5fc81 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -82,6 +82,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu) return &cpu_topology[cpu].core_sibling; } +void update_siblings_masks(unsigned int cpuid) +{
- struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
- /* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->socket_id != cpu_topo->socket_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
- smp_wmb();
+}
/*
- store_cpu_topology is called at boot when only one cpu is running
- and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -91,7 +117,6 @@ void store_cpu_topology(unsigned int cpuid) { struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr;
- unsigned int cpu;
/* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) @@ -133,26 +158,7 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->socket_id = -1; }
- /* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->socket_id == cpu_topo->socket_id) {
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu,
&cpuid_topo->core_sibling);
if (cpuid_topo->core_id == cpu_topo->core_id) {
cpumask_set_cpu(cpuid,
&cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu,
&cpuid_topo->thread_sibling);
}
}
- }
- smp_wmb();
- update_siblings_masks(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id,
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system and to update the cpu_power field accordingly. This patch enables to put more running tasks on big cores than on LITTLE ones. But this patch doesn't ensure that long running tasks will run on big cores and short ones on LITTLE cores.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- arch/arm/kernel/topology.c | 123 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index eb5fc81..f59193c 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -17,7 +17,9 @@ #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> +#include <linux/of.h> #include <linux/sched.h> +#include <linux/slab.h>
#include <asm/cputype.h> #include <asm/topology.h> @@ -49,6 +51,122 @@ static void set_power_scale(unsigned int cpu, unsigned long power) per_cpu(cpu_scale, cpu) = power; }
+#ifdef CONFIG_OF +struct cpu_efficiency { + const char *compatible; + unsigned long efficiency; +}; + +/* + * Table of relative efficiency of each processors + * The efficiency value must fit in 20bit. The final + * cpu_scale value must be in the range + * 0 < cpu_scale < 2*SCHED_POWER_SCALE. + * Processors that are not defined in the table, + * use the default SCHED_POWER_SCALE value for cpu_scale. + */ +struct cpu_efficiency table_efficiency[] = { + {"arm,cortex-a15", 3891}, + {"arm,cortex-a7", 2048}, + {NULL, }, +}; + +struct cpu_capacity { + unsigned long hwid; + unsigned long capacity; +}; + +struct cpu_capacity *cpu_capacity; + +unsigned long middle_capacity = 1; + +static void __init parse_dt_topology(void) +{ + struct cpu_efficiency *cpu_eff; + struct device_node *cn = NULL; + unsigned long min_capacity = (unsigned long)(-1); + unsigned long max_capacity = 0; + unsigned long capacity = 0; + int alloc_size, cpu = 0; + + alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); + cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); + + while ((cn = of_find_node_by_type(cn, "cpu"))) { + const u32 *rate, *reg; + int len; + + if (cpu >= num_possible_cpus()) + break; + + for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) + if (of_device_is_compatible(cn, cpu_eff->compatible)) + break; + + if (cpu_eff->compatible == NULL) + continue; + + rate = of_get_property(cn, "clock-frequency", &len); + if (!rate || len != 4) { + pr_err("%s missing clock-frequency property\n", + cn->full_name); + continue; + } + + reg = of_get_property(cn, "reg", &len); + if (!reg || len != 4) { + pr_err("%s missing reg property\n", cn->full_name); + continue; + } + + capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; + + /* Save min capacity of the system */ + if (capacity < min_capacity) + min_capacity = capacity; + + /* Save max capacity of the system */ + if (capacity > max_capacity) + max_capacity = capacity; + + cpu_capacity[cpu].capacity = capacity; + cpu_capacity[cpu++].hwid = be32_to_cpup(reg); + } + + if (cpu < num_possible_cpus()) + cpu_capacity[cpu].hwid = (unsigned long)(-1); + + middle_capacity = (min_capacity + max_capacity) >> 11; +} + +void update_cpu_power(unsigned int cpu, unsigned long hwid) +{ + unsigned int idx = 0; + + /* look for the cpu's hwid in the cpu capacity table */ + for (idx = 0; idx < num_possible_cpus(); idx++) { + if (cpu_capacity[idx].hwid == hwid) + break; + + if (cpu_capacity[idx].hwid == -1) + return; + } + + if (idx == num_possible_cpus()) + return; + + set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); + + printk(KERN_INFO "CPU%u: update cpu_power %lu\n", + cpu, arch_scale_freq_power(NULL, cpu)); +} + +#else +static inline void parse_dt_topology(void) {} +static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} +#endif + + /* * cpu topology management */ @@ -62,6 +180,7 @@ static void set_power_scale(unsigned int cpu, unsigned long power) * These masks reflect the current use of the affinity levels. * The affinity level can be up to 16 bits according to ARM ARM */ +#define MPIDR_HWID_BITMASK 0xFFFFFF
#define MPIDR_LEVEL0_MASK 0x3 #define MPIDR_LEVEL0_SHIFT 0 @@ -160,6 +279,8 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
+ update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); + printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, @@ -187,4 +308,6 @@ void init_cpu_topology(void) set_power_scale(cpu, SCHED_POWER_SCALE); } smp_wmb(); + + parse_dt_topology(); }
On Wed, 20 Jun 2012 17:19:20 +0200, Vincent Guittot wrote:
Use cpu compatibility field and clock-frequency field of DT to estimate the capacity of each core of the system and to update the cpu_power field accordingly. This patch enables to put more running tasks on big cores than on LITTLE ones. But this patch doesn't ensure that long running tasks will run on big cores and short ones on LITTLE cores.
Signed-off-by: Vincent Guittot vincent.guittot-QSEj5FYQhm4dnm+yROfE0A@public.gmane.org
Reviewed-by: Namhyung Kim namhyung@kernel.org
Thanks.
On Wed, 2012-06-20 at 17:19 +0200, Vincent Guittot wrote:
+#ifdef CONFIG_OF +struct cpu_efficiency {
const char *compatible;
unsigned long efficiency;
+};
+/*
- Table of relative efficiency of each processors
- The efficiency value must fit in 20bit. The final
- cpu_scale value must be in the range
- 0 < cpu_scale < 2*SCHED_POWER_SCALE.
This wants a why.. I suspects its to do with keeping capacity on 1.
- Processors that are not defined in the table,
- use the default SCHED_POWER_SCALE value for cpu_scale.
- */
+struct cpu_efficiency table_efficiency[] = {
{"arm,cortex-a15", 3891},
{"arm,cortex-a7", 2048},
{NULL, },
+};
+struct cpu_capacity {
unsigned long hwid;
unsigned long capacity;
+};
+struct cpu_capacity *cpu_capacity;
+unsigned long middle_capacity = 1;
It would be very nice to not have to learn to read device-tree nonsense to work on the scheduler, how about something like this:?
/* * Iterate all cpus and set the efficiency (as per table_efficiency) * also calculate the middle efficiency: * (max{eff_i} - min{eff_i}) / 2 * This is later used to scale the cpu_power field such that an * 'average' cpu is of middle power. Also see the comments near * table_efficiency[] and update_cpu_power(). */
+static void __init parse_dt_topology(void) +{
struct cpu_efficiency *cpu_eff;
struct device_node *cn = NULL;
unsigned long min_capacity = (unsigned long)(-1);
unsigned long max_capacity = 0;
unsigned long capacity = 0;
int alloc_size, cpu = 0;
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT);
while ((cn = of_find_node_by_type(cn, "cpu"))) {
const u32 *rate, *reg;
int len;
if (cpu >= num_possible_cpus())
break;
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible))
break;
if (cpu_eff->compatible == NULL)
continue;
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
pr_err("%s missing clock-frequency property\n",
cn->full_name);
continue;
}
reg = of_get_property(cn, "reg", &len);
if (!reg || len != 4) {
pr_err("%s missing reg property\n", cn->full_name);
continue;
}
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
/* Save min capacity of the system */
if (capacity < min_capacity)
min_capacity = capacity;
/* Save max capacity of the system */
if (capacity > max_capacity)
max_capacity = capacity;
cpu_capacity[cpu].capacity = capacity;
cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
}
if (cpu < num_possible_cpus())
cpu_capacity[cpu].hwid = (unsigned long)(-1);
middle_capacity = (min_capacity + max_capacity) >> 11;
+}
+void update_cpu_power(unsigned int cpu, unsigned long hwid) +{
unsigned int idx = 0;
/* look for the cpu's hwid in the cpu capacity table */
This smells like an O(n^2) loop.. ARM has only small cpu counts so this isn't an immediate issue, would still be nice to make a note of it though.
for (idx = 0; idx < num_possible_cpus(); idx++) {
if (cpu_capacity[idx].hwid == hwid)
break;
if (cpu_capacity[idx].hwid == -1)
return;
}
if (idx == num_possible_cpus())
return;
set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
OK, but there's no guarantee here you'll stay within that [1,2*SCHED_POWER_SCALE-1] range. This might want a comment and or runtime verification so that when people extend the table_efficiency[] wrongly we'll get notice, humm?
printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
cpu, arch_scale_freq_power(NULL, cpu));
+}
On 2 July 2012 11:11, Peter Zijlstra a.p.zijlstra@chello.nl wrote:
On Wed, 2012-06-20 at 17:19 +0200, Vincent Guittot wrote:
+#ifdef CONFIG_OF +struct cpu_efficiency {
const char *compatible;
unsigned long efficiency;
+};
+/*
- Table of relative efficiency of each processors
- The efficiency value must fit in 20bit. The final
- cpu_scale value must be in the range
- 0 < cpu_scale < 2*SCHED_POWER_SCALE.
This wants a why.. I suspects its to do with keeping capacity on 1.
Yes, that's it. Now, Regarding the div_round_closest that is used in the scheduler to compute the capacity, It should rather stay in the range 0 < cpu_scale < 3*SCHED_POWER_SCALE/2.
- Processors that are not defined in the table,
- use the default SCHED_POWER_SCALE value for cpu_scale.
- */
+struct cpu_efficiency table_efficiency[] = {
{"arm,cortex-a15", 3891},
{"arm,cortex-a7", 2048},
{NULL, },
+};
+struct cpu_capacity {
unsigned long hwid;
unsigned long capacity;
+};
+struct cpu_capacity *cpu_capacity;
+unsigned long middle_capacity = 1;
It would be very nice to not have to learn to read device-tree nonsense to work on the scheduler, how about something like this:?
/*
- Iterate all cpus and set the efficiency (as per table_efficiency)
- also calculate the middle efficiency:
- (max{eff_i} - min{eff_i}) / 2
- This is later used to scale the cpu_power field such that an
- 'average' cpu is of middle power. Also see the comments near
- table_efficiency[] and update_cpu_power().
*/
ok
+static void __init parse_dt_topology(void) +{
struct cpu_efficiency *cpu_eff;
struct device_node *cn = NULL;
unsigned long min_capacity = (unsigned long)(-1);
unsigned long max_capacity = 0;
unsigned long capacity = 0;
int alloc_size, cpu = 0;
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT);
while ((cn = of_find_node_by_type(cn, "cpu"))) {
const u32 *rate, *reg;
int len;
if (cpu >= num_possible_cpus())
break;
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible))
break;
if (cpu_eff->compatible == NULL)
continue;
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
pr_err("%s missing clock-frequency property\n",
cn->full_name);
continue;
}
reg = of_get_property(cn, "reg", &len);
if (!reg || len != 4) {
pr_err("%s missing reg property\n", cn->full_name);
continue;
}
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
/* Save min capacity of the system */
if (capacity < min_capacity)
min_capacity = capacity;
/* Save max capacity of the system */
if (capacity > max_capacity)
max_capacity = capacity;
cpu_capacity[cpu].capacity = capacity;
cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
}
if (cpu < num_possible_cpus())
cpu_capacity[cpu].hwid = (unsigned long)(-1);
middle_capacity = (min_capacity + max_capacity) >> 11;
+}
+void update_cpu_power(unsigned int cpu, unsigned long hwid) +{
unsigned int idx = 0;
/* look for the cpu's hwid in the cpu capacity table */
This smells like an O(n^2) loop.. ARM has only small cpu counts so this isn't an immediate issue, would still be nice to make a note of it though.
Yes, This function is called for each cpu. I will add a comment about that and also about the fact that the complete sequence is done only once. I will also add an optimization for system with identical CPUs and DT information
for (idx = 0; idx < num_possible_cpus(); idx++) {
if (cpu_capacity[idx].hwid == hwid)
break;
if (cpu_capacity[idx].hwid == -1)
return;
}
if (idx == num_possible_cpus())
return;
set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
OK, but there's no guarantee here you'll stay within that [1,2*SCHED_POWER_SCALE-1] range. This might want a comment and or runtime verification so that when people extend the table_efficiency[] wrongly we'll get notice, humm?
I will add more comments but we can't be higher than 2047. The max value for a cpu_power will be : max / ( (min + max) / 2^11 ) which is equal to max / (min + max) * 2^11 so the cpu_scale is smaller than 2^11 as min is never equal to 0
We can have a cpu_power of 0 is a CPU has 2048 times more capacity than another one in the system but I'm not sure that it's a realistic use case
printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
cpu, arch_scale_freq_power(NULL, cpu));
+}
On Mon, 2012-07-02 at 14:11 +0200, Vincent Guittot wrote:
max / (min + max) * 2^11 so the cpu_scale is smaller than 2^11 as min is never equal to 0
D'0h right.. I think I confused the middle thing with avg again.
From: Peter Zijlstra a.p.zijlstra@chello.nl
The x86 sched power implementation has been broken forever and gets in the way of other stuff, remove it.
For archaeological interest, fixing this code would require dealing with the cross-cpu calling of these functions and more importantly, we need to filter idle time out of the a/m-perf stuff because the ratio will go down to 0 when idle, giving a 0 capacity which is not what we'd want.
Signed-off-by: Peter Zijlstra a.p.zijlstra@chello.nl Link: http://lkml.kernel.org/n/tip-wjjwelpti8f8k7i1pdnzmdr8@git.kernel.org --- arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/sched.c | 55 ------------------------------------------ 2 files changed, 1 insertion(+), 56 deletions(-) delete mode 100644 arch/x86/kernel/cpu/sched.c
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 6ab6aa2..c598126 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o -obj-y += vmware.o hypervisor.o sched.o mshyperv.o +obj-y += vmware.o hypervisor.o mshyperv.o obj-y += rdrand.o obj-y += match.o
diff --git a/arch/x86/kernel/cpu/sched.c b/arch/x86/kernel/cpu/sched.c deleted file mode 100644 index a640ae5..0000000 --- a/arch/x86/kernel/cpu/sched.c +++ /dev/null @@ -1,55 +0,0 @@ -#include <linux/sched.h> -#include <linux/math64.h> -#include <linux/percpu.h> -#include <linux/irqflags.h> - -#include <asm/cpufeature.h> -#include <asm/processor.h> - -#ifdef CONFIG_SMP - -static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched); - -static unsigned long scale_aperfmperf(void) -{ - struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched); - unsigned long ratio, flags; - - local_irq_save(flags); - get_aperfmperf(&val); - local_irq_restore(flags); - - ratio = calc_aperfmperf_ratio(old, &val); - *old = val; - - return ratio; -} - -unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) -{ - /* - * do aperf/mperf on the cpu level because it includes things - * like turbo mode, which are relevant to full cores. - */ - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) - return scale_aperfmperf(); - - /* - * maybe have something cpufreq here - */ - - return default_scale_freq_power(sd, cpu); -} - -unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu) -{ - /* - * aperf/mperf already includes the smt gain - */ - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) - return SCHED_LOAD_SCALE; - - return default_scale_smt_power(sd, cpu); -} - -#endif
Heteregeneous ARM platform uses arch_scale_freq_power function to reflect the relative capacity of each core
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org --- kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index de00a48..d98ae90 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -42,7 +42,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* * Use arch dependent cpu power functions */ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, true)
SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false)