Add basic filtering mechanism of filtering to frequent requests. On the other hand create a by-pass for RT od deadline bandwith requests which must be served.
Signed-off-by: Lukasz Luba l.luba@partner.samsung.com --- kernel/sched/power.c | 24 +++++++++++++++--------- kernel/sched/power.h | 3 ++- kernel/sched/sched.h | 3 ++- 3 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/power.c b/kernel/sched/power.c index 0dcb4579b474..28f3b6c8c0a3 100644 --- a/kernel/sched/power.c +++ b/kernel/sched/power.c @@ -13,6 +13,7 @@
#define THERMAL_REQUEST_KFIFO_SIZE (64 * sizeof(struct power_request)) #define DEFAULT_CPU_WEIGHT 1024 +#define MINIMUM_UPDATE_TIME 10000000 /* 10 ms */
static DEFINE_PER_CPU(struct cpu_power, cpu_power); DEFINE_PER_CPU(struct update_sched_power *, update_cpu_power); @@ -120,9 +121,15 @@ EXPORT_SYMBOL_GPL(sched_power_cpu_reinit_weight); //////////////////////////////////////////////////////////////
-static bool should_update_next_weight(int time) +static bool should_update_next_weight(u64 time, int flags) { - return 1; + if (flags & SCHED_POWER_FORCE_UPDATE_RT) + return 1; + + if (time >= sched_clock() + MINIMUM_UPDATE_TIME) + return 1; + + return 0; }
static void sched_power_work(struct kthread_work *work) @@ -139,15 +146,13 @@ static void sched_power_work(struct kthread_work *work) raw_spin_lock(&cpower->update_lock); w = cpower->weight; req = cpower->req; - cpower->req.time = 0; + cpower->req.time = sched_clock(); cpower->weight = req.weight; raw_spin_unlock(&cpower->update_lock);
- if (should_update_next_weight(req.time)) { - pr_info("cpower req poped\n"); - thermal_cpu_cdev_set_weight(req.cpu, req.weight); - need_update = true; - } + pr_info("cpower req poped\n"); + thermal_cpu_cdev_set_weight(req.cpu, req.weight); + need_update = true; }
if (need_update) @@ -176,7 +181,7 @@ static void sched_power_update(struct update_sched_power *update, int cpu, return;
/* Filter to frequent changes or not needed*/ - if (!should_update_next_weight(time)) + if (!should_update_next_weight(time, flags)) return;
sp = cpower->sched_power; @@ -185,6 +190,7 @@ static void sched_power_update(struct update_sched_power *update, int cpu, cpower->req.weight = weight; cpower->req.cpu = cpu; cpower->req.time = time; + cpower->req.flags = flags; raw_spin_unlock(&cpower->update_lock);
if (!sp->work_in_progress) { diff --git a/kernel/sched/power.h b/kernel/sched/power.h index f08277efd50d..1992e637d53f 100644 --- a/kernel/sched/power.h +++ b/kernel/sched/power.h @@ -32,7 +32,8 @@ struct sched_power { struct power_request { unsigned int weight; int cpu; - int time; + u64 time; + int flags };
struct cpu_power { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c1714ef73669..7c8dea6df31a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2245,6 +2245,7 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned } #endif
+#define SCHED_POWER_FORCE_UPDATE_RT 0x01 #ifdef CONFIG_THERMAL struct update_sched_power { void (*func)(struct update_sched_power *, int, unsigned int, int, int); @@ -2255,7 +2256,7 @@ static inline void sched_power_change_cpu_weight(int cpu, unsigned long weight, int flags) { struct update_sched_power *update; - int time = 0; + u64 time = sched_clock();
update = rcu_dereference_sched(*per_cpu_ptr(&update_cpu_power, cpu));