>From cbe76570608d21b0e8313d5164e064594525b460 Mon Sep 17 00:00:00 2001
From: Morten Rasmussen <morten.rasmussen@arm.com>
Date: Mon, 17 Jun 2013 15:48:18 +0100
Subject: [PATCH] Revert "sched: SCHED_HMP multi-domain task migration
 control"

This reverts commit bfda2b2d521005363dc6b03f100fd0dbe18ce972.

Conflicts:

	kernel/sched/fair.c
---
 include/linux/sched.h |    4 ----
 kernel/sched/core.c   |    4 ----
 kernel/sched/fair.c   |   40 ----------------------------------------
 3 files changed, 48 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 351294b..f8dc676 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -939,10 +939,6 @@ struct sched_avg {
 	s64 decay_count;
 	unsigned long load_avg_contrib;
 	unsigned long load_avg_ratio;
-#ifdef CONFIG_SCHED_HMP
-	u64 hmp_last_up_migration;
-	u64 hmp_last_down_migration;
-#endif
 	u32 usage_avg_sum;
 };
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c43cd98..3ca9b4e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1604,10 +1604,6 @@ static void __sched_fork(struct task_struct *p)
 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
 	p->se.avg.runnable_avg_period = 0;
 	p->se.avg.runnable_avg_sum = 0;
-#ifdef CONFIG_SCHED_HMP
-	p->se.avg.hmp_last_up_migration = 0;
-	p->se.avg.hmp_last_down_migration = 0;
-#endif
 #endif
 #ifdef CONFIG_SCHEDSTATS
 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 90f61d8..03c4262 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3585,16 +3585,12 @@ static void hmp_offline_cpu(int cpu)
  * tweaking suit particular needs.
  *
  * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
- * hmp_next_up_threshold: Delay before next up migration (1024 ~= 1 ms)
- * hmp_next_down_threshold: Delay before next down migration (1024 ~= 1 ms)
  */
 unsigned int hmp_up_threshold = 512;
 unsigned int hmp_down_threshold = 256;
 #ifdef CONFIG_SCHED_HMP_PRIO_FILTER
 unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
 #endif
-unsigned int hmp_next_up_threshold = 4096;
-unsigned int hmp_next_down_threshold = 4096;
 
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
@@ -3657,22 +3653,6 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
 				tsk_cpus_allowed(tsk));
 }
 
-static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
-{
-	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-
-	se->avg.hmp_last_up_migration = cfs_rq_clock_task(cfs_rq);
-	se->avg.hmp_last_down_migration = 0;
-}
-
-static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
-{
-	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-
-	se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
-	se->avg.hmp_last_up_migration = 0;
-}
-
 #ifdef CONFIG_HMP_VARIABLE_SCALE
 /*
  * Heterogenous multiprocessor (HMP) optimizations
@@ -4013,13 +3993,11 @@ unlock:
 #ifdef CONFIG_SCHED_HMP
 	if (hmp_up_migration(prev_cpu, &p->se)) {
 		new_cpu = hmp_select_faster_cpu(p, prev_cpu);
-		hmp_next_up_delay(&p->se, new_cpu);
 		trace_sched_hmp_migrate(p, new_cpu, 0);
 		return new_cpu;
 	}
 	if (hmp_down_migration(prev_cpu, &p->se)) {
 		new_cpu = hmp_select_slower_cpu(p, prev_cpu);
-		hmp_next_down_delay(&p->se, new_cpu);
 		trace_sched_hmp_migrate(p, new_cpu, 0);
 		return new_cpu;
 	}
@@ -6302,8 +6280,6 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 {
 	struct task_struct *p = task_of(se);
-	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-	u64 now;
 
 	if (hmp_cpu_is_fastest(cpu))
 		return 0;
@@ -6314,12 +6290,6 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 		return 0;
 #endif
 
-	/* Let the task load settle before doing another up migration */
-	now = cfs_rq_clock_task(cfs_rq);
-	if (((now - se->avg.hmp_last_up_migration) >> 10)
-					< hmp_next_up_threshold)
-		return 0;
-
 	if (se->avg.load_avg_ratio > hmp_up_threshold) {
 		/* Target domain load < ~94% */
 		if (hmp_domain_min_load(hmp_faster_domain(cpu), NULL)
@@ -6336,8 +6306,6 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
 {
 	struct task_struct *p = task_of(se);
-	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
-	u64 now;
 
 	if (hmp_cpu_is_slowest(cpu))
 		return 0;
@@ -6351,12 +6319,6 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
 	}
 #endif
 
-	/* Let the task load settle before doing another down migration */
-	now = cfs_rq_clock_task(cfs_rq);
-	if (((now - se->avg.hmp_last_down_migration) >> 10)
-					< hmp_next_down_threshold)
-		return 0;
-
 	if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
 					tsk_cpus_allowed(p))
 		&& se->avg.load_avg_ratio < hmp_down_threshold) {
@@ -6556,7 +6518,6 @@ static void hmp_force_up_migration(int this_cpu)
 				target->migrate_task = p;
 				force = 1;
 				trace_sched_hmp_migrate(p, target->push_cpu, 1);
-				hmp_next_up_delay(&p->se, target->push_cpu);
 			}
 		}
 		if (!force && !target->active_balance) {
@@ -6571,7 +6532,6 @@ static void hmp_force_up_migration(int this_cpu)
 				target->migrate_task = p;
 				force = 1;
 				trace_sched_hmp_migrate(p, target->push_cpu, 2);
-				hmp_next_down_delay(&p->se, target->push_cpu);
 			}
 		}
 		raw_spin_unlock_irqrestore(&target->lock, flags);
-- 
1.7.9.5

