On Fri, Feb 27, 2015 at 04:54:07PM +0100, Vincent Guittot wrote:
- unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
sa->running_avg_sum += delta_w * scale_freq
>> SCHED_CAPACITY_SHIFT;
so the only thing that could be improved is somehow making this multiplication go away when the arch doesn't implement the function.
But I'm not sure how to do that without #ifdef.
Maybe a little something like so then... that should make the compiler get rid of those multiplications unless the arch needs them.
--- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u6 return contrib + runnable_avg_yN_sum[n]; }
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu); - /* * We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable @@ -6015,11 +6013,6 @@ static unsigned long default_scale_capac return SCHED_CAPACITY_SCALE; }
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu) -{ - return default_scale_capacity(sd, cpu); -} - static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu) { if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1387,7 +1387,14 @@ static inline int hrtick_enabled(struct
#ifdef CONFIG_SMP extern void sched_avg_update(struct rq *rq); -extern unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu); + +#ifndef arch_scale_freq_capacity +static __always_inline +unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) {