This reverts commit ad363efa74968250033ce2d9de00488d0cba9134.
Conflicts:
kernel/sched/core.c
--- arch/arm/Kconfig | 9 ----- arch/arm/include/asm/topology.h | 4 +- kernel/sched/core.c | 4 -- kernel/sched/fair.c | 81 --------------------------------------- 4 files changed, 2 insertions(+), 96 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c957bb1..f15c657 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1603,15 +1603,6 @@ config HMP_SLOW_CPU_MASK Specify the cpuids of the slow CPUs in the system as a list string, e.g. cpuid 0+1 should be specified as 0-1.
-config SCHED_HMP_SYSFS - bool "HMP Scheduling tuning options" - depends on SCHED_HMP - help - Export sysfs tunables for experimental scheduler optimizations for - SCHED HMP. Allows modification migration thresholds and minimum - priorities. These tuning parameters are intended for engineering - tuning and are not enabled by default. - config HAVE_ARM_SCU bool help diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index b76a898..983fa7c 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -42,8 +42,8 @@ int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask); .newidle_idx = 0, \ .wake_idx = 0, \ .forkexec_idx = 0, \ - .flags = (!hmp_enbld)*SD_LOAD_BALANCE \ - | (!hmp_enbld)*SD_PREFER_SIBLING \ + \ + .flags = 0*SD_LOAD_BALANCE \ | 1*SD_BALANCE_NEWIDLE \ | 1*SD_BALANCE_EXEC \ | 1*SD_BALANCE_FORK \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9bada60..e94e274 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5839,10 +5839,6 @@ int __weak arch_sd_sibling_asym_packing(void) return 0*SD_ASYM_PACKING; }
-#ifdef CONFIG_SCHED_HMP -extern int hmp_enbld; -#endif - int __weak arch_sd_share_power_line(void) { return 1*SD_SHARE_POWERLINE; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d758086..e24f2e3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -31,11 +31,6 @@
#include "sched.h"
-#ifdef CONFIG_SCHED_HMP_SYSFS -#include <linux/cpuset.h> -#include <linux/sysfs.h> -#endif - /* * Targeted preemption latency for CPU-bound tasks: * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) @@ -3330,66 +3325,6 @@ static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq); se->avg.hmp_last_up_migration = 0; } - -/* Tracks Task migration enabled or not */ -int hmp_enbld = 1; - -#ifdef CONFIG_SCHED_HMP_SYSFS -struct hmp_attr { - struct attribute attr; - ssize_t (*show)(struct kobject *kobj, - struct attribute *attr, char *buf); - ssize_t (*store)(struct kobject *a, struct attribute *b, - const char *c, size_t count); -}; - -#define nops() ; - -#define SCHED_HMP_SYSFS_INFO(_name, _lower_limit, _upper_limit, _fn) \ -static ssize_t sched_##_name##_show(struct kobject *kobj, \ - struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", _name); \ -} \ - \ -static ssize_t sched_##_name##_store(struct kobject *a, \ - struct attribute *b, const char *buf, size_t count) \ -{ \ - unsigned int level = 0; \ - if (sscanf(buf, "%u", &level) != 1) \ - return -EINVAL; \ - if (level < _lower_limit || level > _upper_limit) \ - return -EINVAL; \ - _name = level; \ - _fn(); \ - return count; \ -} \ - \ -static struct hmp_attr attr_##_name = \ -__ATTR(_name, 0644, sched_##_name##_show, sched_##_name##_store) - -SCHED_HMP_SYSFS_INFO(hmp_enbld, 0, 1, rebuild_sched_domains); -SCHED_HMP_SYSFS_INFO(hmp_up_threshold, 0, 1023, nops); -SCHED_HMP_SYSFS_INFO(hmp_down_threshold, 0, 1023, nops); -#ifdef CONFIG_SCHED_HMP_PRIO_FILTER -SCHED_HMP_SYSFS_INFO(hmp_up_prio, 100, 140, nops); -#endif - -static struct attribute *hmp_attributes[] = { - &attr_hmp_enbld.attr, - &attr_hmp_up_threshold.attr, - &attr_hmp_down_threshold.attr, -#ifdef CONFIG_SCHED_HMP_PRIO_FILTER - &attr_hmp_up_prio.attr, -#endif - NULL -}; - -static struct attribute_group hmp_attr_group = { - .attrs = hmp_attributes, - .name = "hmp_tuning", -}; -#endif /* CONFIG_SCHED_HMP_SYSFS */ #endif /* CONFIG_SCHED_HMP */
static inline bool is_buddy_busy(int cpu) @@ -3543,9 +3478,6 @@ unlock: rcu_read_unlock();
#ifdef CONFIG_SCHED_HMP - if (!hmp_enbld) - goto hmp_end; - if (hmp_up_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_faster_cpu(p, prev_cpu); hmp_next_up_delay(&p->se, new_cpu); @@ -3561,7 +3493,6 @@ unlock: /* Make sure that the task stays in its previous hmp domain */ if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus)) return prev_cpu; -hmp_end: #endif
return new_cpu; @@ -6170,9 +6101,6 @@ static void hmp_force_up_migration(int this_cpu) unsigned int force; struct task_struct *p;
- if (!hmp_enbld) - return; - if (!spin_trylock(&hmp_force_migration)) return; for_each_online_cpu(cpu) { @@ -6725,12 +6653,3 @@ __init void init_sched_fair_class(void) #endif /* SMP */
} - -#ifdef CONFIG_SCHED_HMP_SYSFS -static int __init hmp_sched(void) -{ - BUG_ON(sysfs_create_group(&cpu_subsys.dev_root->kobj, &hmp_attr_group)); - return 0; -} -late_initcall(hmp_sched); -#endif