@@ -4191,7 +4196,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) this_cpu = smp_processor_id(); prev_cpu = task_cpu(p); load = source_load(prev_cpu);
- this_load = target_load(this_cpu);
- this_load = source_load(this_cpu);
It looks a bit odd that both source and destination cpu loads are found using a function name source_load(). IMHO, it would be clearer if you got rid of source_load() and target_load() completely, and just used weighted_cpuload() instead. You only use target_load() twice (further down) anyway.
Yes, weighted_cpuload has better meaning.
/* * If sync wakeup then subtract the (maximum possible) @@ -4247,7 +4252,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) if (balanced || (this_load <= load &&
this_load + target_load(prev_cpu) <= tl_per_task)) {
/*this_load + source_load(prev_cpu) <= tl_per_task)) {
- This domain has SD_WAKE_AFFINE and
- p is cache cold in this domain, and
@@ -4293,7 +4298,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) if (local_group) load = source_load(i); else
load = target_load(i);
load = target_load(i, imbalance);
Here you could easily use weighted_cpuload() instead and apply the bias as before (below).
avg_load += load; } @@ -4309,7 +4314,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) } } while (group = group->next, group != sd->groups);
- if (!idlest || 100*this_load < imbalance*min_load)
- if (!idlest || this_load < min_load)
This change would go away if you used weighted_cpuload().
Yes, but seem better to left the bias unified in target_load.
return NULL;
return idlest; } @@ -5745,6 +5750,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, { unsigned long load; int i;
- int bias = 100 + (env->sd->imbalance_pct - 100) / 2;
memset(sgs, 0, sizeof(*sgs)); @@ -5752,8 +5758,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, struct rq *rq = cpu_rq(i); /* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i);
if (local_group && env->idle != CPU_IDLE)
load = target_load(i, bias);
Could be weighted_cpuload() instead, but you would have to keep the lines you delete below.
On current logical target_load may seek before, and do bias when idx is busy or idle. I am afraid weighted_cpuload is not good here. and I prefer to keep bias in a uniform mode, not spread in larger scope.
else load = source_load(i);
@@ -6193,14 +6199,6 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if ((local->idle_cpus < busiest->idle_cpus) && busiest->sum_nr_running <= busiest->group_weight) goto out_balanced;
- } else {
/*
* In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
* imbalance_pct to be conservative.
*/
if (100 * busiest->avg_load <=
env->sd->imbalance_pct * local->avg_load)
}goto out_balanced;
force_balance:
I think it clearer now what this patch set does. It rips out cpu_load[] completely and changes all it users to use weighted_cpuload() (cfs.runnable_load_avg) instead. The longer term view provided by the cpu_load[] indexes is not replaced. Whether that is a loss, I'm not sure.
Thanks! Fengguang's testing system is monitor this branch. so no news is good news. :)
Morten