Port walt_prepare_migrate() and walt_finish_migrate() functions, so these two functions can be used separately before and after task migration. As result, we don't need acquire locks both for source and destination rq.
This patch is to directly port these two functions from Vikram latest WALT patches to AOSP Android common kernel 4.4.
Signed-off-by: Vikram Mulukutla markivx@codeaurora.org Signed-off-by: Leo Yan leo.yan@linaro.org --- kernel/sched/walt.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/walt.h | 4 +++ 2 files changed, 94 insertions(+)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index d9d0991..b2a22b3 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -820,6 +820,96 @@ void walt_migrate_sync_cpu(int cpu) sync_cpu = smp_processor_id(); }
+void walt_finish_migrate(struct task_struct *p, struct rq *dest_rq, bool locked) +{ + u64 wallclock; + unsigned long flags; + + if (!p->on_rq && p->state != TASK_WAKING) + return; + + if (locked == false) + raw_spin_lock_irqsave(&dest_rq->lock, flags); + + lockdep_assert_held(&dest_rq->lock); + + wallclock = walt_ktime_clock(); + + /* Update counters on destination CPU */ + walt_update_task_ravg(dest_rq->curr, dest_rq, + TASK_UPDATE, wallclock, 0); + + /* We may be in a new window. Update task counters */ + walt_update_task_ravg(p, dest_rq, TASK_MIGRATE, wallclock, 0); + + if (p->ravg.curr_window) { + if (!dest_rq->window_start) { + p->ravg.curr_window = 0; + p->ravg.mark_start = 0; + } + dest_rq->curr_runnable_sum += p->ravg.curr_window; + } + + if (p->ravg.prev_window) { + if (!dest_rq->window_start) + p->ravg.prev_window = 0; + dest_rq->prev_runnable_sum += p->ravg.prev_window; + } + + if (locked == false) + raw_spin_unlock_irqrestore(&dest_rq->lock, flags); + + trace_walt_migration_update_sum(dest_rq, p); +} + +void walt_prepare_migrate(struct task_struct *p, struct rq *src_rq, bool locked) +{ + u64 wallclock; + unsigned long flags; + + if (!p->on_rq && p->state != TASK_WAKING) + return; + + if (exiting_task(p)) + return; + + if (locked == false) + raw_spin_lock_irqsave(&src_rq->lock, flags); + + lockdep_assert_held(&src_rq->lock); + + /* Note that same wallclock reference is used for all 3 events below */ + wallclock = walt_ktime_clock(); + + /* Update counters on source CPU */ + walt_update_task_ravg(task_rq(p)->curr, task_rq(p), + TASK_UPDATE, wallclock, 0); + + /* Update task's counters */ + walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); + + /* Fixup busy time */ + if (p->ravg.curr_window) + src_rq->curr_runnable_sum -= p->ravg.curr_window; + + if (p->ravg.prev_window) + src_rq->prev_runnable_sum -= p->ravg.prev_window; + + if ((s64)src_rq->prev_runnable_sum < 0) { + src_rq->prev_runnable_sum = 0; + WARN_ON(1); + } + if ((s64)src_rq->curr_runnable_sum < 0) { + src_rq->curr_runnable_sum = 0; + WARN_ON(1); + } + + if (locked == false) + raw_spin_unlock_irqrestore(&src_rq->lock, flags); + + trace_walt_migration_update_sum(src_rq, p); +} + void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { struct rq *src_rq = task_rq(p); diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index e181c87..91bc3cc 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -24,6 +24,8 @@ void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, struct task_struct *p); void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, struct task_struct *p); +void walt_prepare_migrate(struct task_struct *p, struct rq *rq, bool locked); +void walt_finish_migrate(struct task_struct *p, struct rq *rq, bool locked); void walt_fixup_busy_time(struct task_struct *p, int new_cpu); void walt_init_new_task_load(struct task_struct *p); void walt_mark_task_starting(struct task_struct *p); @@ -47,6 +49,8 @@ static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, struct task_struct *p) { } static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, struct task_struct *p) { } +static inline void walt_prepare_migrate(struct task_struct *p, struct rq *rq, bool locked) { } +static inline void walt_finish_migrate(struct task_struct *p, struct rq *rq, bool locked) { } static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { } static inline void walt_init_new_task_load(struct task_struct *p) { } static inline void walt_mark_task_starting(struct task_struct *p) { } -- 1.9.1