Workqueues queues work on current cpu, if the caller haven't passed a preferred cpu. This may wake up an idle CPU, which is actually not required.
This work can be processed by any CPU and so we must select a non-idle CPU here. This patch adds in support in workqueue framework to get preferred CPU details from the scheduler, instead of using current CPU.
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org --- arch/arm/Kconfig | 11 +++++++++++ kernel/workqueue.c | 25 ++++++++++++++++++------- 2 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5944511..da17bd0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1594,6 +1594,17 @@ config HMP_SLOW_CPU_MASK Specify the cpuids of the slow CPUs in the system as a list string, e.g. cpuid 0+1 should be specified as 0-1.
+config MIGRATE_WQ + bool "(EXPERIMENTAL) Migrate Workqueues to non-idle cpu" + depends on SMP && EXPERIMENTAL + help + Workqueues queues work on current cpu, if the caller haven't passed a + preferred cpu. This may wake up an idle CPU, which is actually not + required. This work can be processed by any CPU and so we must select + a non-idle CPU here. This patch adds in support in workqueue + framework to get preferred CPU details from the scheduler, instead of + using current CPU. + config HAVE_ARM_SCU bool help diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 692a55b..fd8df4a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -456,6 +456,16 @@ static inline void debug_work_activate(struct work_struct *work) { } static inline void debug_work_deactivate(struct work_struct *work) { } #endif
+/* This enables migration of a work to a non-IDLE cpu instead of current cpu */ +#ifdef CONFIG_MIGRATE_WQ +static int wq_select_cpu(void) +{ + return sched_select_cpu(SD_NUMA, -1); +} +#else +#define wq_select_cpu() smp_processor_id() +#endif + /* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); @@ -995,7 +1005,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct global_cwq *last_gcwq;
if (unlikely(cpu == WORK_CPU_UNBOUND)) - cpu = raw_smp_processor_id(); + cpu = wq_select_cpu();
/* * It's multi cpu. If @wq is non-reentrant and @work @@ -1066,8 +1076,9 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret;
- ret = queue_work_on(get_cpu(), wq, work); - put_cpu(); + preempt_disable(); + ret = queue_work_on(wq_select_cpu(), wq, work); + preempt_enable();
return ret; } @@ -1102,7 +1113,7 @@ static void delayed_work_timer_fn(unsigned long __data) struct delayed_work *dwork = (struct delayed_work *)__data; struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
- __queue_work(smp_processor_id(), cwq->wq, &dwork->work); + __queue_work(wq_select_cpu(), cwq->wq, &dwork->work); }
/** @@ -1158,7 +1169,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) lcpu = gcwq->cpu; else - lcpu = raw_smp_processor_id(); + lcpu = wq_select_cpu(); } else lcpu = WORK_CPU_UNBOUND;
@@ -2823,8 +2834,8 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); static inline void __flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) - __queue_work(raw_smp_processor_id(), - get_work_cwq(&dwork->work)->wq, &dwork->work); + __queue_work(wq_select_cpu(), get_work_cwq(&dwork->work)->wq, + &dwork->work); }
/**