On Mon, Feb 17, 2014 at 01:55:10AM +0000, Alex Shi wrote:
Old code considers the bias in source/target_load already. but still use imbalance_pct as last check in idlest/busiest group finding. It is also a kind of redundant job. If we bias imbalance in source/target_load, we'd better not use imbalance_pct again.
After cpu_load array removed, it is nice time to unify the target bias consideration. So I remove the imbalance_pct from last check and add the live bias using.
Signed-off-by: Alex Shi alex.shi@linaro.org
kernel/sched/fair.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index eeffe75..a85a10b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1016,7 +1016,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, static unsigned long weighted_cpuload(const int cpu); static unsigned long source_load(int cpu); -static unsigned long target_load(int cpu); +static unsigned long target_load(int cpu, int imbalance_pct); static unsigned long power_of(int cpu); static long effective_load(struct task_group *tg, int cpu, long wl, long wg); @@ -3967,7 +3967,7 @@ static unsigned long source_load(int cpu)
- Return a high guess at the load of a migration-target cpu weighted
- according to the scheduling class and "nice" value.
*/ -static unsigned long target_load(int cpu) +static unsigned long target_load(int cpu, int imbalance_pct) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); @@ -3975,6 +3975,11 @@ static unsigned long target_load(int cpu) if (!sched_feat(LB_BIAS)) return total;
- /*
* Bias target load with imbalance_pct.
*/
- total = total * imbalance_pct / 100;
- return max(rq->cpu_load, total);
} @@ -4180,6 +4185,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) struct task_group *tg; unsigned long weight; int balanced;
- int bias = 100 + (sd->imbalance_pct - 100) / 2;
/* * If we wake multiple tasks be careful to not bounce @@ -4191,7 +4197,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) this_cpu = smp_processor_id(); prev_cpu = task_cpu(p); load = source_load(prev_cpu);
- this_load = target_load(this_cpu);
- this_load = target_load(this_cpu, bias);
It seems that you now apply the bias to both sides of the comparison. The above should be:
+ this_load = target_load(this_cpu, 100);
to make sense.
/* * If sync wakeup then subtract the (maximum possible) @@ -4226,7 +4232,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) this_eff_load *= this_load + effective_load(tg, this_cpu, weight, weight);
prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= power_of(this_cpu); prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);prev_eff_load = bias;
@@ -4247,7 +4253,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) if (balanced || (this_load <= load &&
this_load + target_load(prev_cpu) <= tl_per_task)) {
this_load + target_load(prev_cpu, sd->imbalance_pct)
I think it should be target_load(prev_cpu, 100) here instead. IIUC, it is an unbiased comparison.