To support C-State for arm64, added arch-specific function and FFH(Functional Fixed Hardware) related function.
Signed-off-by: Jonghwan Choi jhbird.choi@samsung.com --- arch/arm64/include/asm/acpi.h | 12 +++++++ arch/arm64/include/asm/irqflags.h | 6 ++++ arch/arm64/include/asm/processor.h | 3 ++ arch/arm64/kernel/Makefile | 4 +++ arch/arm64/kernel/cstate.c | 67 ++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/process.c | 6 ++++ drivers/acpi/processor_idle.c | 14 -------- 7 files changed, 98 insertions(+), 14 deletions(-) create mode 100644 arch/arm64/kernel/cstate.c
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index dff95da..34e591a 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -92,6 +92,14 @@ extern int arm_cpu_to_apicid[NR_CPUS]; extern int gic_acpi_init(void); extern int cpu_acpi_read_ops(int cpu);
+/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + return max_cstate; +} + #else /* !CONFIG_ACPI */ #define acpi_disabled 1 /* ACPI sometimes enabled on ARM */ #define acpi_noirq 1 /* ACPI sometimes enabled on ARM */ @@ -110,4 +118,8 @@ static inline int cpu_acpi_read_ops(int cpu)
#endif
+#define ARCH_HAS_POWER_INIT 1 + +#define acpi_unlazy_tlb(x) + #endif /*_ASM_ARM64_ACPI_H*/ diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 11cc941..8ab3bd7 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -19,6 +19,7 @@ #ifdef __KERNEL__
#include <asm/ptrace.h> +#include <asm/barrier.h>
/* * CPU interrupt mask handling. @@ -113,5 +114,10 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #define local_dbg_enable() asm("msr daifclr, #8" : : : "memory") #define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
+static inline void arch_safe_halt(void) +{ + arch_local_irq_enable(); + wfi(); +} #endif #endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 50ce951..dca0939 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -47,6 +47,9 @@ #define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK #endif /* __KERNEL__ */
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, + IDLE_POLL}; + struct debug_info { /* Have we suspended stepping by a debugger? */ int suspended_step; diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index db63a0a..b829268 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -26,6 +26,10 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
+ifneq ($(CONFIG_ACPI_PROCESSOR),) +arm64-obj-y += cstate.o +endif + obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) head-y := head.o diff --git a/arch/arm64/kernel/cstate.c b/arch/arm64/kernel/cstate.c new file mode 100644 index 0000000..00c22cd --- /dev/null +++ b/arch/arm64/kernel/cstate.c @@ -0,0 +1,67 @@ +/* + * Based on arch/x86/kernel/acpi/cstate.c + * + * Copyright (C) 2005 Intel Corporation + * Venkatesh Pallipadi venkatesh.pallipadi@intel.com + * - Added _PDC for SMP C-states on Intel CPUs + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/cpu_pm.h> + +#include <acpi/processor.h> +#include <asm/acpi.h> +#include <asm/cpu.h> +#include <asm/suspend.h> + +/* + * Initialize bm_flags based on the CPU cache properties + * On SMP it depends on cache configuration + * - When cache is not shared among all CPUs, we flush cache + * before entering C3. + * - When cache is shared among all CPUs, we use bm_check + * mechanism as in UP case + * + * This routine is called only after all the CPUs are online + */ +void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, + unsigned int cpu) +{ + struct cpuinfo_arm *c = &cpu_data(cpu); + + flags->bm_check = 0; + flags->bm_control = 0; + + /* To Do */ +} +EXPORT_SYMBOL(acpi_processor_power_init_bm_check); + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + /* To Do */ + return 0; +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); + +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) +{ + cpu_pm_enter(); + /* + * Pass idle state index to cpu_suspend which in turn will call + * the CPU ops suspend protocol with idle index as a parameter. + * + * Some states would not require context to be saved and flushed + * to DRAM, so calling cpu_suspend would not be stricly necessary. + * When power domains specifications for ARM CPUs are finalized then + * this code can be optimized to prevent saving registers if not + * needed. + */ + cpu_suspend(cx->type); + + cpu_pm_exit(); +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6391485..8ce1610 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -94,6 +94,12 @@ void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); EXPORT_SYMBOL_GPL(arm_pm_restart);
/* + * Idle related variables and functions + */ +unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; +EXPORT_SYMBOL(boot_option_idle_override); + +/* * This is our default idle handler. */ void arch_cpu_idle(void) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fae22f4..3dca36d 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -70,10 +70,8 @@ static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
static int disabled_by_idle_boot_param(void) { -#ifdef CONFIG_X86 return boot_option_idle_override == IDLE_POLL || boot_option_idle_override == IDLE_HALT; -#endif }
/* @@ -120,9 +118,7 @@ static struct dmi_system_id processor_power_dmi_table[] = { static void acpi_safe_halt(void) { if (!tif_need_resched()) { -#ifdef CONFIG_X86 safe_halt(); -#endif local_irq_disable(); } } @@ -437,7 +433,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) } else { continue; } -#ifdef CONFIG_X86 if (cx.type == ACPI_STATE_C1 && (boot_option_idle_override == IDLE_NOMWAIT)) { /* @@ -453,7 +448,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) cx.entry_method = ACPI_CSTATE_HALT; snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); } -#endif } else { snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", cx.address); @@ -754,11 +748,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT) -#ifdef CONFIG_X86 safe_halt(); -#else - ; -#endif else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { inb(cx->address); /* See comment in acpi_idle_do_entry() */ @@ -853,9 +843,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, } }
-#ifdef CONFIG_X86 acpi_unlazy_tlb(smp_processor_id()); -#endif
/* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); @@ -1133,9 +1121,7 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (!first_run) { dmi_check_system(processor_power_dmi_table); -#ifdef CONFIG_X86 max_cstate = acpi_processor_cstate_check(max_cstate); -#endif if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n",