I would like to support acpi cpuidle  for ARM.
For that I applied Lorenzo Pieralisi's patches on leg-kernel repository.
[PATCH RFC v2 0/4] ARM generic idle states]
[PATCH RFC v2 1/4] drivers: cpuidle: implement OF based idle states
infrastructure
[PATCH RFC v2 2/4] arm64: add PSCI CPU_SUSPEND based cpu_suspend support
[PATCH RFC v2 3/4] drivers: cpuidle: CPU idle ARM64 driver
[PATCH RFC v2 4/4] arm64: boot: dts: update rtsm aemv8 dts with PSCI and
idle states
Lorenzo Pieralisi's patches used PSCI method to enter the low power mode.
(Over C2 State)
I used that method in processor_idle.c
Last commit :
Commit a493444ce7f1792b44897160454149dc31ca208b
Author: Graeme Gregory graeme.gregory@linaro.org
Date:   Tue Feb 11 09:21:17 2014 +0000
     linaro-configs: add enterprise-distro.conf
     Signed-off-by: Graeme Gregory graeme.gregory@linaro.org
For applying this patch, leg-kernel + [RFC] cpufreq: Add ACPI cpufreq
support for ARM(My patch) + Lorenzo Pieralisi's patches should be merged.
Some point might require fixing. So i would like to get your reviews and
comments.
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index dff95da..3b0ace4 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -85,6 +85,14 @@ void arch_fix_phys_package_id(int num, u32 slot);
 extern int (*acpi_suspend_lowlevel)(void);
 #define acpi_wakeup_address (0)
+/*
+ * Check if the CPU can handle C2 and deeper
+ */
+static inline unsigned int acpi_processor_cstate_check(unsigned int
max_cstate)
+{
+	return max_cstate;
+}
+
 /* map logic cpu id to physical GIC id */
 extern int arm_cpu_to_apicid[NR_CPUS];
 #define cpu_physical_id(cpu) arm_cpu_to_apicid[cpu]
@@ -110,4 +118,5 @@ static inline int cpu_acpi_read_ops(int cpu)
+#define acpi_unlazy_tlb(x)	do { } while(0)
 #endif /*_ASM_ARM64_ACPI_H*/
diff --git a/arch/arm64/include/asm/irqflags.h
b/arch/arm64/include/asm/irqflags.h
index b2fcfbc..758c4e9 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -19,6 +19,7 @@
 #ifdef __KERNEL__
/*
  * CPU interrupt mask handling.
@@ -90,5 +91,11 @@ static inline int arch_irqs_disabled_flags(unsigned long
flags)
    return flags & PSR_I_BIT;
 }
+static inline void arch_safe_halt(void)
+{
+	arch_local_irq_enable();
+	wfi();
+}
+
 #endif
 #endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0556a0a..c0e931f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -34,8 +34,10 @@
 #include <linux/sched.h>       /* need_resched() */
 #include <linux/clockchips.h>
 #include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
 #include <linux/syscore_ops.h>
 #include <acpi/processor.h>
+#include <asm/suspend.h>
/*
  * Include the apic definitions for x86 to have the APIC timer related
defines
@@ -70,10 +72,8 @@ static DEFINE_PER_CPU(struct acpi_processor_cx *
[CPUIDLE_STATE_MAX],
static int disabled_by_idle_boot_param(void)
 {
-#ifdef CONFIG_X86
    return boot_option_idle_override == IDLE_POLL ||
    	boot_option_idle_override == IDLE_HALT;
-#endif
 }
/*
@@ -120,9 +120,7 @@ static struct dmi_system_id processor_power_dmi_table[]
= {
 static void acpi_safe_halt(void)
 {
    if (!tif_need_resched()) {
-#ifdef CONFIG_X86
    	safe_halt();
-#endif
    	local_irq_disable();
    }
 }
@@ -752,11 +750,7 @@ static int acpi_idle_play_dead(struct cpuidle_device
*dev, int index)
    while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
-#ifdef CONFIG_X86
    		safe_halt();
-#else
-		acpi_safe_halt();
-#endif
    	else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
    		inb(cx->address);
    		/* See comment in acpi_idle_do_entry() */
@@ -769,6 +763,7 @@ static int acpi_idle_play_dead(struct cpuidle_device
*dev, int index)
    return 0;
 }
+#ifdef CONFIG_X86
 /**
  * acpi_idle_enter_simple - enters an ACPI state without BM handling
  * @dev: the target CPU
@@ -811,6 +806,41 @@ static int acpi_idle_enter_simple(struct cpuidle_device
*dev,
    lapic_timer_state_broadcast(pr, cx, 0);
    return index;
 }
+#else
+/**
+ * acpi_idle_enter_state - enters an acpi state without bm handling
+ * @dev: the target cpu
+ * @drv: cpuidle driver with cpuidle state information
+ * @index: the index of suggested state
+ */
+static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
+{
+	int ret;
+
+	if (!index) {
+		cpu_do_idle();
+		return index;
+	}
+
+	cpu_pm_enter();
+	/*
+	 * Pass idle state index to cpu_suspend which in turn will call
+	 * the CPU ops suspend protocol with idle index as a parameter.
+	 *
+	 * Some states would not require context to be saved and flushed
+	 * to DRAM, so calling cpu_suspend would not be stricly necessary.
+	 * When power domains specifications for ARM CPUs are finalized then
+	 * this code can be optimized to prevent saving registers if not
+	 * needed.
+	 */
+	ret = cpu_suspend(index);
+
+	cpu_pm_exit();
+
+	return ret ? -1 : index;
+}
+#endif
static int c3_cpu_count;
 static DEFINE_RAW_SPINLOCK(c3_lock);
@@ -851,9 +881,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device
*dev,
    	}
    }
/* Tell the scheduler that we are going deep-idle: */
    sched_clock_idle_sleep_event();
@@ -1131,9 +1159,7 @@ int acpi_processor_power_init(struct acpi_processor
*pr)
if (!first_run) {
    	dmi_check_system(processor_power_dmi_table);
-#ifdef CONFIG_X86
    	max_cstate = acpi_processor_cstate_check(max_cstate);
-#endif
    	if (max_cstate < ACPI_C_STATES_MAX)
    		printk(KERN_NOTICE
    		       "ACPI: processor limited to max C-state
%d\n",
-- 
1.7.10.4
> -----Original Message-----
> From: Jonghwan Choi [mailto:jhbird.choi@samsung.com]
> Sent: Monday, April 14, 2014 2:54 PM
> To: Jonghwan Choi (jhbird.choi@gmail.com); sbkim73@samsung.com
> Cc: 'linaro-acpi@lists.linaro.org'
> Subject: [PATCH linaro-acpi] [RFC] cpufreq: Add ACPI cpufreq support for
> ARM
> 
> Hi all,
> 
> I would like to support acpi-cpufreq for ARM.
> For that, Firstly, I made new file which is a apci-freq-arm.c.
> But some people, such as Rafael, Sudeep and Hanjun Guo, worried about
that.
> What ther are worried about is there are too many duplicate code between
> acpi-cpufreq.c and acpi-cpifre-arm.c So, I tried to separate this code
> into 3 parts which are common, x86-specific and arm-specific code.
> I tried to separate soc-specific code from acpi-cpufreq.c as much as
> possible.
> But as you know, there are too much x86-specific code in acpi-cpufreq.c
> When I tried to implement acpi-cpufreq-arm, there were a lot of compile
> error(due to x86-specific code).
> So I used #ifdef CONFIG_X86 to solve those errors.(and some hack codes) In
> this patch, I mostly focused on cpufreq.
> Later I will remove #ifdef CONFIG_X86.
> I would like your reviews and comments.
> 
> This patch is  based on
> "
http://git.linaro.org/leg/acpi/leg-kernel.git"
> 
> Last commit :
> commit a493444ce7f1792b44897160454149dc31ca208b
> Author: Graeme Gregory 
graeme.gregory@linaro.org
> Date:   Tue Feb 11 09:21:17 2014 +0000
>     linaro-configs: add enterprise-distro.conf
>     Signed-off-by: Graeme Gregory graeme.gregory@linaro.org
> 
> 
> Thanks
> Best Regards
> 
> Signed-off-by: Jonghwan Choi 
jhbird.choi@samsung.com
> ---
>  arch/arm64/Kconfig                 |   12 ++
>  arch/arm64/include/asm/cpu.h       |    3 +-
>  arch/arm64/include/asm/processor.h |    2 +
>  arch/arm64/kernel/process.c        |    6 +
>  arch/arm64/kernel/setup.c          |    5 +-
>  drivers/acpi/Kconfig               |    3 +-
>  drivers/acpi/acpi_processor.c      |    4 +
>  drivers/acpi/processor_idle.c      |   12 ++
>  drivers/cpufreq/Kconfig            |    2 +-
>  drivers/cpufreq/Kconfig.arm        |   16 ++
>  drivers/cpufreq/Makefile           |    3 +-
>  drivers/cpufreq/acpi-cpufreq.c     |  419
+++++----------------------------
> ---
>  drivers/cpufreq/acpi-cpufreq.h     |   68 ++++++
>  drivers/cpufreq/acpi-cpufreq_arm.c |   88 ++++++++
>  drivers/cpufreq/acpi-cpufreq_x86.c |  334 ++++++++++++++++++++++++++++
>  include/linux/acpi.h               |    1 +
>  16 files changed, 604 insertions(+), 373 deletions(-)  create mode 100644
> drivers/cpufreq/acpi-cpufreq.h  create mode 100644 drivers/cpufreq/acpi-
> cpufreq_arm.c
>  create mode 100644 drivers/cpufreq/acpi-cpufreq_x86.c
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index
> 94cc542..e150e60 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -108,6 +108,13 @@ config IOMMU_HELPER  config KERNEL_MODE_NEON
>  	def_bool y
> 
> +config ARCH_HAS_CPUFREQ
> +	bool
> +	help
> +	  Internal node to signify that the ARCH has CPUFREQ support
> +	  and that the relevant menu configurations are displayed for
> +	  it.
> +
>  source "init/Kconfig"
> 
>  source "kernel/Kconfig.freezer"
> @@ -116,6 +123,7 @@ menu "Platform selection"
> 
>  config ARCH_VEXPRESS
>  	bool "ARMv8 software model (Versatile Express)"
> +	select ARCH_HAS_CPUFREQ
>  	select ARCH_REQUIRE_GPIOLIB
>  	select COMMON_CLK_VERSATILE
>  	select POWER_RESET_VEXPRESS
> @@ -325,6 +333,10 @@ endmenu
> 
>  menu "CPU Power Management"
> 
> +if ARCH_HAS_CPUFREQ
> +source "drivers/cpufreq/Kconfig"
> +endif
> +
>  source "drivers/cpuidle/Kconfig"
> 
>  endmenu
> diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
> index 8625eb1..69db3e7 100644
> --- a/arch/arm64/include/asm/cpu.h
> +++ b/arch/arm64/include/asm/cpu.h
> @@ -20,6 +20,7 @@ struct cpuinfo_arm {
>  #endif
>  };
> 
> -DECLARE_PER_CPU(struct cpuinfo_arm, cpu_data);
> +DECLARE_PER_CPU(struct cpuinfo_arm, cpu_info);
> +#define cpu_data(cpu)		per_cpu(cpu_info, cpu)
> 
>  #endif
> diff --git a/arch/arm64/include/asm/processor.h
> b/arch/arm64/include/asm/processor.h
> index 50ce951..7196873 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -47,6 +47,8 @@
>  #define ARCH_LOW_ADDRESS_LIMIT	PHYS_MASK
>  #endif /* __KERNEL__ */
> 
> +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
> +			 IDLE_POLL};
>  struct debug_info {
>  	/* Have we suspended stepping by a debugger? */
>  	int			suspended_step;
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 1c0a9be..1c985a9 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -86,6 +86,12 @@ void (*arm_pm_restart)(enum reboot_mode reboot_mode,
> const char *cmd);  EXPORT_SYMBOL_GPL(arm_pm_restart);
> 
>  /*
> + * Idle related variables and functions  */ unsigned long
> +boot_option_idle_override = IDLE_NO_OVERRIDE;
> +EXPORT_SYMBOL(boot_option_idle_override);
> +
> +/*
>   * This is our default idle handler.
>   */
>  void arch_cpu_idle(void)
> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index
> fecf272..99b973b 100644
> --- a/arch/arm64/kernel/setup.c
> +++ b/arch/arm64/kernel/setup.c
> @@ -377,14 +377,15 @@ static int __init arm64_device_init(void)  }
> arch_initcall(arm64_device_init);
> 
> -static DEFINE_PER_CPU(struct cpu, cpu_data);
> +DEFINE_PER_CPU(struct cpu, cpu_info);
> +EXPORT_PER_CPU_SYMBOL(cpu_info);
> 
>  static int __init topology_init(void)
>  {
>  	int i;
> 
>  	for_each_possible_cpu(i) {
> -		struct cpu *cpu = &per_cpu(cpu_data, i);
> +		struct cpu *cpu = &per_cpu(cpu_info, i);
>  		cpu->hotpluggable = 1;
>  		register_cpu(cpu, i);
>  	}
> diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index
> cbc5dfc..961211f 100644
> --- a/drivers/acpi/Kconfig
> +++ b/drivers/acpi/Kconfig
> @@ -152,7 +152,7 @@ config ACPI_PROCESSOR
>  	tristate "Processor"
>  	select THERMAL
>  	select CPU_IDLE
> -	depends on X86 || IA64
> +	depends on X86 || IA64 || ARM64
>  	default y
>  	help
>  	  This driver installs ACPI as the idle handler for Linux and uses
> diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
> index c29c2c3..6a89856 100644
> --- a/drivers/acpi/acpi_processor.c
> +++ b/drivers/acpi/acpi_processor.c
> @@ -177,6 +177,7 @@ static int acpi_processor_hotadd_init(struct
> acpi_processor *pr)
>  	cpu_maps_update_begin();
>  	cpu_ho\12.36.155.51\jhbirdchoi\Kernel\leg-kerneltplug_begin();
> 
> +#ifdef CONFIG_X86
>  	ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
>  	if (ret)
>  		goto out;
> @@ -186,6 +187,7 @@ static int acpi_processor_hotadd_init(struct
> acpi_processor *pr)
>  		acpi_unmap_lsapic(pr->id);
>  		goto out;
>  	}
> +#endif
> 
>  	/*
>  	 * CPU got hot-added, but cpu_data is not initialized yet.  Set a
> flag @@ -457,8 +459,10 @@ static void acpi_processor_remove(struct
> acpi_device *device)
>  	cpu_hotplug_begin();
> 
>  	/* Remove the CPU. */
> +#ifdef CONFIG_X86
>  	arch_unregister_cpu(pr->id);
>  	acpi_unmap_lsapic(pr->id);
> +#endif
> 
>  	cpu_hotplug_done();
>  	cpu_maps_update_done();
> diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
> index 3dca36d..0556a0a 100644
> --- a/drivers/acpi/processor_idle.c
> +++ b/drivers/acpi/processor_idle.c
> @@ -70,8 +70,10 @@ static DEFINE_PER_CPU(struct acpi_processor_cx *
> [CPUIDLE_STATE_MAX],
> 
>  static int disabled_by_idle_boot_param(void)  {
> +#ifdef CONFIG_X86
>  	return boot_option_idle_override == IDLE_POLL ||
>  		boot_option_idle_override == IDLE_HALT;
> +#endif
>  }
> 
>  /*
> @@ -118,7 +120,9 @@ static struct dmi_system_id
processor_power_dmi_table[]
> = {  static void acpi_safe_halt(void)  {
>  	if (!tif_need_resched()) {
> +#ifdef CONFIG_X86
>  		safe_halt();
> +#endif
>  		local_irq_disable();
>  	}
>  }
> @@ -748,7 +752,11 @@ static int acpi_idle_play_dead(struct cpuidle_device
> *dev, int index)
>  	while (1) {
> 
>  		if (cx->entry_method == ACPI_CSTATE_HALT)
> +#ifdef CONFIG_X86
>  			safe_halt();
> +#else
> +		acpi_safe_halt();
> +#endif
>  		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
>  			inb(cx->address);
>  			/* See comment in acpi_idle_do_entry() */ @@ -843,7
> +851,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
>  		}
>  	}
> 
> +#ifdef CONFIG_X86
>  	acpi_unlazy_tlb(smp_processor_id());
> +#endif
> 
>  	/* Tell the scheduler that we are going deep-idle: */
>  	sched_clock_idle_sleep_event();
> @@ -1121,7 +1131,9 @@ int acpi_processor_power_init(struct acpi_processor
> *pr)
> 
>  	if (!first_run) {
>  		dmi_check_system(processor_power_dmi_table);
> +#ifdef CONFIG_X86
>  		max_cstate = acpi_processor_cstate_check(max_cstate);
> +#endif
>  		if (max_cstate < ACPI_C_STATES_MAX)
>  			printk(KERN_NOTICE
>  			       "ACPI: processor limited to max C-state
%d\n",
> diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index
> 3a7202d..6417406 100644
> --- a/drivers/cpufreq/Kconfig
> +++ b/drivers/cpufreq/Kconfig
> @@ -228,7 +228,7 @@ source "drivers/cpufreq/Kconfig.x86"
>  endmenu
> 
>  menu "ARM CPU frequency scaling drivers"
> -depends on ARM
> +depends on ARM64
>  source "drivers/cpufreq/Kconfig.arm"
>  endmenu
> 
> diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
> index 3129749..ef00a4c 100644
> --- a/drivers/cpufreq/Kconfig.arm
> +++ b/drivers/cpufreq/Kconfig.arm
> @@ -16,6 +16,21 @@ config ARM_DT_BL_CPUFREQ
>  	  This enables probing via DT for Generic CPUfreq driver for ARM
>  	  big.LITTLE platform. This gets frequency tables from DT.
> 
> +config ARM_ACPI_CPUFREQ
> +	tristate "ACPI Processor P-States driver"
> +	depends on ACPI_PROCESSOR
> +	help
> +	  This driver adds a CPUFreq driver which utilizes the ACPI
> +	  Processor Performance States.
> +	  This driver also supports ARM CPUs.
> +
> +	  To compile this driver as a module, choose M here: the
> +	  module will be called acpi-cpufreq.
> +
> +	  For details, take a look at 
file:Documentation/cpu-freq/.
> +
> +	  If in doubt, say N.
> +
>  config ARM_EXYNOS_CPUFREQ
>  	bool
> 
> @@ -249,3 +264,4 @@ config ARM_VEXPRESS_SPC_CPUFREQ
>          help
>            This add the CPUfreq driver support for Versatile Express
>  	  big.LITTLE platforms using SPC for power management.
> +
> diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index
> 0fd80cb..83e44f6 100644
> --- a/drivers/cpufreq/Makefile
> +++ b/drivers/cpufreq/Makefile
> @@ -21,7 +21,8 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)	+= cpufreq-cpu0.o
>  # powernow-k8 can load then. ACPI is preferred to all other hardware-
> specific drivers.
>  # speedstep-* is preferred over p4-clockmod.
> 
> -obj-$(CONFIG_X86_ACPI_CPUFREQ)		+= acpi-cpufreq.o
> +obj-$(CONFIG_X86_ACPI_CPUFREQ)		+= acpi-cpufreq.o acpi-
> cpufreq_x86.o
> +obj-$(CONFIG_ARM_ACPI_CPUFREQ)		+= acpi-cpufreq.o acpi-
> cpufreq_arm.o
>  obj-$(CONFIG_X86_POWERNOW_K8)		+= powernow-k8.o
>  obj-$(CONFIG_X86_PCC_CPUFREQ)		+= pcc-cpufreq.o
>  obj-$(CONFIG_X86_POWERNOW_K6)		+= powernow-k6.o
> diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-
> cpufreq.c index 18448a7..26bf7112 100644
> --- a/drivers/cpufreq/acpi-cpufreq.c
> +++ b/drivers/cpufreq/acpi-cpufreq.c
> @@ -42,37 +42,17 @@
> 
>  #include <acpi/processor.h>
> 
> -#include <asm/msr.h>
> +#include <asm/cpu.h>
>  #include <asm/processor.h>
> -#include <asm/cpufeature.h>
> +
> +#include "acpi-cpufreq.h"
> 
>  MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
> MODULE_DESCRIPTION("ACPI Processor P-States Driver");
> MODULE_LICENSE("GPL");
> 
> -#define PFX "acpi-cpufreq: "
> -
> -enum {
> -	UNDEFINED_CAPABLE = 0,
> -	SYSTEM_INTEL_MSR_CAPABLE,
> -	SYSTEM_AMD_MSR_CAPABLE,
> -	SYSTEM_IO_CAPABLE,
> -};
> -
> -#define INTEL_MSR_RANGE		(0xffff)
> -#define AMD_MSR_RANGE		(0x7)
> -
> -#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
> -
> -struct acpi_cpufreq_data {
> -	struct acpi_processor_performance *acpi_data;
> -	struct cpufreq_frequency_table *freq_table;
> -	unsigned int resume;
> -	unsigned int cpu_feature;
> -	cpumask_var_t freqdomain_cpus;
> -};
> -
>  static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
> +static struct acpi_cpufreq_common *acpi;
> 
>  /* acpi_perf_data is a pointer to percpu data. */  static struct
> acpi_processor_performance __percpu *acpi_perf_data; @@ -80,62 +60,12 @@
> static struct acpi_processor_performance __percpu *acpi_perf_data;  static
> struct cpufreq_driver acpi_cpufreq_driver;
> 
>  static unsigned int acpi_pstate_strict; -static struct msr __percpu
*msrs;
> -
> -static bool boost_state(unsigned int cpu) -{
> -	u32 lo, hi;
> -	u64 msr;
> -
> -	switch (boot_cpu_data.x86_vendor) {
> -	case X86_VENDOR_INTEL:
> -		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
> -		msr = lo | ((u64)hi << 32);
> -		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
> -	case X86_VENDOR_AMD:
> -		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
> -		msr = lo | ((u64)hi << 32);
> -		return !(msr & MSR_K7_HWCR_CPB_DIS);
> -	}
> -	return false;
> -}
> -
> -static void boost_set_msrs(bool enable, const struct cpumask *cpumask) -{
> -	u32 cpu;
> -	u32 msr_addr;
> -	u64 msr_mask;
> -
> -	switch (boot_cpu_data.x86_vendor) {
> -	case X86_VENDOR_INTEL:
> -		msr_addr = MSR_IA32_MISC_ENABLE;
> -		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
> -		break;
> -	case X86_VENDOR_AMD:
> -		msr_addr = MSR_K7_HWCR;
> -		msr_mask = MSR_K7_HWCR_CPB_DIS;
> -		break;
> -	default:
> -		return;
> -	}
> -
> -	rdmsr_on_cpus(cpumask, msr_addr, msrs);
> -
> -	for_each_cpu(cpu, cpumask) {
> -		struct msr *reg = per_cpu_ptr(msrs, cpu);
> -		if (enable)
> -			reg->q &= ~msr_mask;
> -		else
> -			reg->q |= msr_mask;
> -	}
> -
> -	wrmsr_on_cpus(cpumask, msr_addr, msrs);
> -}
> 
>  static int _store_boost(int val)
>  {
>  	get_online_cpus();
> -	boost_set_msrs(val, cpu_online_mask);
> +	if (acpi->ops->update_boost)
> +		acpi->ops->update_boost(val, cpu_online_mask);
>  	put_online_cpus();
>  	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
> 
> @@ -151,13 +81,12 @@ static ssize_t show_freqdomain_cpus(struct
> cpufreq_policy *policy, char *buf)
> 
>  cpufreq_freq_attr_ro(freqdomain_cpus);
> 
> -#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
>  static ssize_t store_boost(const char *buf, size_t count)  {
>  	int ret;
>  	unsigned long val = 0;
> 
> -	if (!acpi_cpufreq_driver.boost_supported)
> +	if (!acpi->boost_supported)
>  		return -EINVAL;
> 
>  	ret = kstrtoul(buf, 10, &val);
> @@ -181,131 +110,23 @@ static ssize_t show_cpb(struct cpufreq_policy
> *policy, char *buf)  }
> 
>  cpufreq_freq_attr_rw(cpb);
> -#endif
> -
> -static int check_est_cpu(unsigned int cpuid) -{
> -	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
> -
> -	return cpu_has(cpu, X86_FEATURE_EST);
> -}
> -
> -static int check_amd_hwpstate_cpu(unsigned int cpuid) -{
> -	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
> -
> -	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
> -}
> -
> -static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) -{
> -	struct acpi_processor_performance *perf;
> -	int i;
> -
> -	perf = data->acpi_data;
> -
> -	for (i = 0; i < perf->state_count; i++) {
> -		if (value == perf->states[i].status)
> -			return data->freq_table[i].frequency;
> -	}
> -	return 0;
> -}
> -
> -static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) -{
> -	int i;
> -	struct acpi_processor_performance *perf;
> -
> -	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
> -		msr &= AMD_MSR_RANGE;
> -	else
> -		msr &= INTEL_MSR_RANGE;
> -
> -	perf = data->acpi_data;
> -
> -	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
> {
> -		if (msr == perf->states[data-
> >freq_table[i].driver_data].status)
> -			return data->freq_table[i].frequency;
> -	}
> -	return data->freq_table[0].frequency;
> -}
> -
> -static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) -{
> -	switch (data->cpu_feature) {
> -	case SYSTEM_INTEL_MSR_CAPABLE:
> -	case SYSTEM_AMD_MSR_CAPABLE:
> -		return extract_msr(val, data);
> -	case SYSTEM_IO_CAPABLE:
> -		return extract_io(val, data);
> -	default:
> -		return 0;
> -	}
> -}
> -
> -struct msr_addr {
> -	u32 reg;
> -};
> -
> -struct io_addr {
> -	u16 port;
> -	u8 bit_width;
> -};
> -
> -struct drv_cmd {
> -	unsigned int type;
> -	const struct cpumask *mask;
> -	union {
> -		struct msr_addr msr;
> -		struct io_addr io;
> -	} addr;
> -	u32 val;
> -};
> 
>  /* Called via smp_call_function_single(), on the target CPU */  static
> void do_drv_read(void *_cmd)  {
> -	struct drv_cmd *cmd = _cmd;
> -	u32 h;
> +        struct drv_cmd *cmd = _cmd;
> +	struct acpi_cpufreq_data *data = per_cpu(acfreq_data,
> +cpumask_first(cmd->mask));
> 
> -	switch (cmd->type) {
> -	case SYSTEM_INTEL_MSR_CAPABLE:
> -	case SYSTEM_AMD_MSR_CAPABLE:
> -		rdmsr(cmd->addr.msr.reg, cmd->val, h);
> -		break;
> -	case SYSTEM_IO_CAPABLE:
> -		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
> -				&cmd->val,
> -				(u32)cmd->addr.io.bit_width);
> -		break;
> -	default:
> -		break;
> -	}
> +        acpi->ops->drv_read(cmd, data);
>  }
> 
>  /* Called via smp_call_function_many(), on the target CPUs */  static
> void do_drv_write(void *_cmd)  {
>  	struct drv_cmd *cmd = _cmd;
> -	u32 lo, hi;
> +	struct acpi_cpufreq_data *data = per_cpu(acfreq_data,
> +cpumask_first(cmd->mask));
> 
> -	switch (cmd->type) {
> -	case SYSTEM_INTEL_MSR_CAPABLE:
> -		rdmsr(cmd->addr.msr.reg, lo, hi);
> -		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
> -		wrmsr(cmd->addr.msr.reg, lo, hi);
> -		break;
> -	case SYSTEM_AMD_MSR_CAPABLE:
> -		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
> -		break;
> -	case SYSTEM_IO_CAPABLE:
> -		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
> -				cmd->val,
> -				(u32)cmd->addr.io.bit_width);
> -		break;
> -	default:
> -		break;
> -	}
> +        acpi->ops->drv_write(cmd, data);
>  }
> 
>  static void drv_read(struct drv_cmd *cmd) @@ -330,32 +151,13 @@ static
> void drv_write(struct drv_cmd *cmd)
> 
>  static u32 get_cur_val(const struct cpumask *mask)  {
> -	struct acpi_processor_performance *perf;
>  	struct drv_cmd cmd;
> 
>  	if (unlikely(cpumask_empty(mask)))
>  		return 0;
> 
> -	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
> -	case SYSTEM_INTEL_MSR_CAPABLE:
> -		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
> -		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
> -		break;
> -	case SYSTEM_AMD_MSR_CAPABLE:
> -		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
> -		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
> -		break;
> -	case SYSTEM_IO_CAPABLE:
> -		cmd.type = SYSTEM_IO_CAPABLE;
> -		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
> -		cmd.addr.io.port = perf->control_register.address;
> -		cmd.addr.io.bit_width = perf->control_register.bit_width;
> -		break;
> -	default:
> -		return 0;
> -	}
> -
>  	cmd.mask = mask;
> +
>  	drv_read(&cmd);
> 
>  	pr_debug("get_cur_val = %u\n", cmd.val); @@ -377,7 +179,7 @@ static
> unsigned int get_cur_freq_on_cpu(unsigned int cpu)
>  	}
> 
>  	cached_freq = data->freq_table[data->acpi_data->state].frequency;
> -	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
> +	freq = acpi->ops->extract_freq(get_cur_val(cpumask_of(cpu)), data);
>  	if (freq != cached_freq) {
>  		/*
>  		 * The dreaded BIOS frequency change behind our back.
> @@ -398,7 +200,7 @@ static unsigned int check_freqs(const struct cpumask
> *mask, unsigned int freq,
>  	unsigned int i;
> 
>  	for (i = 0; i < 100; i++) {
> -		cur_freq = extract_freq(get_cur_val(mask), data);
> +		cur_freq = acpi->ops->extract_freq(get_cur_val(mask), data);
>  		if (cur_freq == freq)
>  			return 1;
>  		udelay(10);
> @@ -434,34 +236,14 @@ static int acpi_cpufreq_target(struct cpufreq_policy
> *policy,
>  		}
>  	}
> 
> -	switch (data->cpu_feature) {
> -	case SYSTEM_INTEL_MSR_CAPABLE:
> -		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
> -		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
> -		cmd.val = (u32) perf->states[next_perf_state].control;
> -		break;
> -	case SYSTEM_AMD_MSR_CAPABLE:
> -		cmd.type = SYSTEM_AMD_MSR_CAPABLE;
> -		cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
> -		cmd.val = (u32) perf->states[next_perf_state].control;
> -		break;
> -	case SYSTEM_IO_CAPABLE:
> -		cmd.type = SYSTEM_IO_CAPABLE;
> -		cmd.addr.io.port = perf->control_register.address;
> -		cmd.addr.io.bit_width = perf->control_register.bit_width;
> -		cmd.val = (u32) perf->states[next_perf_state].control;
> -		break;
> -	default:
> -		result = -ENODEV;
> -		goto out;
> -	}
> -
>  	/* cpufreq holds the hotplug lock, so we are safe from here on */
>  	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
>  		cmd.mask = policy->cpus;
>  	else
>  		cmd.mask = cpumask_of(policy->cpu);
> 
> +	cmd.val = (u32) perf->states[next_perf_state].control;
> +
>  	drv_write(&cmd);
> 
>  	if (acpi_pstate_strict) {
> @@ -484,7 +266,7 @@ static unsigned long  acpi_cpufreq_guess_freq(struct
> acpi_cpufreq_data *data, unsigned int cpu)  {
>  	struct acpi_processor_performance *perf = data->acpi_data;
> -
> +#ifdef CONFIG_X86
>  	if (cpu_khz) {
>  		/* search the closest match to cpu_khz */
>  		unsigned int i;
> @@ -501,11 +283,11 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
> *data, unsigned int cpu)
>  		}
>  		perf->state = perf->state_count-1;
>  		return freqn;
> -	} else {
> -		/* assume CPU is at P0... */
> -		perf->state = 0;
> -		return perf->states[0].core_frequency * 1000;
>  	}
> +#endif
> +	/* assume CPU is at P0... */
> +	perf->state = 0;
> +	return perf->states[0].core_frequency * 1000;
>  }
> 
>  static void free_acpi_perf_data(void)
> @@ -519,44 +301,6 @@ static void free_acpi_perf_data(void)
>  	free_percpu(acpi_perf_data);
>  }
> 
> -static int boost_notify(struct notifier_block *nb, unsigned long action,
> -		      void *hcpu)
> -{
> -	unsigned cpu = (long)hcpu;
> -	const struct cpumask *cpumask;
> -
> -	cpumask = get_cpu_mask(cpu);
> -
> -	/*
> -	 * Clear the boost-disable bit on the CPU_DOWN path so that
> -	 * this cpu cannot block the remaining ones from boosting. On
> -	 * the CPU_UP path we simply keep the boost-disable flag in
> -	 * sync with the current global state.
> -	 */
> -
> -	switch (action) {
> -	case CPU_UP_PREPARE:
> -	case CPU_UP_PREPARE_FROZEN:
> -		boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
> -		break;
> -
> -	case CPU_DOWN_PREPARE:
> -	case CPU_DOWN_PREPARE_FROZEN:
> -		boost_set_msrs(1, cpumask);
> -		break;
> -
> -	default:
> -		break;
> -	}
> -
> -	return NOTIFY_OK;
> -}
> -
> -
> -static struct notifier_block boost_nb = {
> -	.notifier_call          = boost_notify,
> -};
> -
>  /*
>   * acpi_cpufreq_early_init - initialize ACPI P-States library
>   *
> @@ -618,27 +362,6 @@ static const struct dmi_system_id
> sw_any_bug_dmi_table[] = {
>  	},
>  	{ }
>  };
> -
> -static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) -{
> -	/* Intel Xeon Processor 7100 Series Specification Update
> -	 * 
http://www.intel.com/Assets/PDF/specupdate/314554.pdf
> -	 * AL30: A Machine Check Exception (MCE) Occurring during an
> -	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
> -	 * Both Processor Cores to Lock Up. */
> -	if (c->x86_vendor == X86_VENDOR_INTEL) {
> -		if ((c->x86 == 15) &&
> -		    (c->x86_model == 6) &&
> -		    (c->x86_mask == 8)) {
> -			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
> -			    "Xeon(R) 7100 Errata AL30, processors may "
> -			    "lock up on frequency changes: disabling "
> -			    "acpi-cpufreq.\n");
> -			return -ENODEV;
> -		    }
> -		}
> -	return 0;
> -}
>  #endif
> 
>  static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -648,7
> +371,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
>  	unsigned int cpu = policy->cpu;
>  	struct acpi_cpufreq_data *data;
>  	unsigned int result = 0;
> -	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
> +	void *c = &cpu_data(policy->cpu);
>  	struct acpi_processor_performance *perf;  #ifdef CONFIG_SMP
>  	static int blacklisted;
> @@ -659,9 +382,11 @@ static int acpi_cpufreq_cpu_init(struct
> cpufreq_policy *policy)  #ifdef CONFIG_SMP
>  	if (blacklisted)
>  		return blacklisted;
> -	blacklisted = acpi_cpufreq_blacklist(c);
> -	if (blacklisted)
> -		return blacklisted;
> +	if (acpi->ops->arch_check) {
> +		blacklisted = acpi->ops->arch_check(c);
> +		if (blacklisted)
> +			return blacklisted;
> +	}
>  #endif
> 
>  	data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -676,7 +401,7 @@
> static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
>  	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
>  	per_cpu(acfreq_data, cpu) = data;
> 
> -	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
> +	if (acpi->const_loops)
>  		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
> 
>  	result = acpi_processor_register_performance(data->acpi_data, cpu);
> @@ -700,13 +425,13 @@ static int acpi_cpufreq_cpu_init(struct
> cpufreq_policy *policy)
>  	dmi_check_system(sw_any_bug_dmi_table);
>  	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
>  		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
> -		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
> +//FIXME		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
>  	}
> 
> -	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
> +	if (acpi->feature_hw_pstate && !acpi_pstate_strict) {
>  		cpumask_clear(policy->cpus);
>  		cpumask_set_cpu(cpu, policy->cpus);
> -		cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
> +//FIXME		cpumask_copy(data->freqdomain_cpus,
> cpu_sibling_mask(cpu));
>  		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
>  		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
>  	}
> @@ -724,32 +449,7 @@ static int acpi_cpufreq_cpu_init(struct
> cpufreq_policy *policy)
>  		goto err_unreg;
>  	}
> 
> -	switch (perf->control_register.space_id) {
> -	case ACPI_ADR_SPACE_SYSTEM_IO:
> -		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
> -		    boot_cpu_data.x86 == 0xf) {
> -			pr_debug("AMD K8 systems must use native
drivers.\n");
> -			result = -ENODEV;
> -			goto err_unreg;
> -		}
> -		pr_debug("SYSTEM IO addr space\n");
> -		data->cpu_feature = SYSTEM_IO_CAPABLE;
> -		break;
> -	case ACPI_ADR_SPACE_FIXED_HARDWARE:
> -		pr_debug("HARDWARE addr space\n");
> -		if (check_est_cpu(cpu)) {
> -			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
> -			break;
> -		}
> -		if (check_amd_hwpstate_cpu(cpu)) {
> -			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
> -			break;
> -		}
> -		result = -ENODEV;
> -		goto err_unreg;
> -	default:
> -		pr_debug("Unknown addr space %d\n",
> -			(u32) (perf->control_register.space_id));
> +	if (acpi->ops->arch_feature(data)) {
>  		result = -ENODEV;
>  		goto err_unreg;
>  	}
> @@ -899,34 +599,14 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
> 
>  static void __init acpi_cpufreq_boost_init(void)  {
> -	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))
> {
> -		msrs = msrs_alloc();
> -
> -		if (!msrs)
> -			return;
> -
> -		acpi_cpufreq_driver.boost_supported = true;
> -		acpi_cpufreq_driver.boost_enabled = boost_state(0);
> -		get_online_cpus();
> -
> -		/* Force all MSRs to the same value */
> -		boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
> -			       cpu_online_mask);
> -
> -		register_cpu_notifier(&boost_nb);
> -
> -		put_online_cpus();
> -	}
> +	if (acpi->ops->arch_boost_init)
> +		acpi->ops->arch_boost_init(acpi);
>  }
> 
>  static void acpi_cpufreq_boost_exit(void)  {
> -	if (msrs) {
> -		unregister_cpu_notifier(&boost_nb);
> -
> -		msrs_free(msrs);
> -		msrs = NULL;
> -	}
> +	if (acpi->ops->arch_boost_exit)
> +		acpi->ops->arch_boost_exit();
>  }
> 
>  static int __init acpi_cpufreq_init(void) @@ -946,14 +626,23 @@ static
> int __init acpi_cpufreq_init(void)
>  	if (ret)
>  		return ret;
> 
> -#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
> +	acpi = kzalloc(sizeof(*acpi), GFP_KERNEL);
> +	if (!acpi)
> +		return -ENOMEM;
> +
> +	if (arch_acpi_cpufreq_init(acpi)) { //FIXME
> +		kfree(acpi);
> +		return -ENOMEM;
> +	}
> +
> +
>  	/* this is a sysfs file with a strange name and an even stranger
>  	 * semantic - per CPU instantiation, but system global effect.
>  	 * Lets enable it only on AMD CPUs for compatibility reasons and
>  	 * only if configured. This is considered legacy code, which
>  	 * will probably be removed at some point in the future.
>  	 */
> -	if (check_amd_hwpstate_cpu(0)) {
> +	if (acpi->feature_hw_pstate) {
>  		struct freq_attr **iter;
> 
>  		pr_debug("adding sysfs entry for cpb\n"); @@ -965,11 +654,12
> @@ static int __init acpi_cpufreq_init(void)
>  		if (iter[1] == NULL)
>  			*iter = &cpb;
>  	}
> -#endif
> +
>  	acpi_cpufreq_boost_init();
> 
>  	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
>  	if (ret) {
> +		kfree(acpi);
>  		free_acpi_perf_data();
>  		acpi_cpufreq_boost_exit();
>  	}
> @@ -985,6 +675,8 @@ static void __exit acpi_cpufreq_exit(void)
>  	cpufreq_unregister_driver(&acpi_cpufreq_driver);
> 
>  	free_acpi_perf_data();
> +
> +	kfree(acpi);
>  }
> 
>  module_param(acpi_pstate_strict, uint, 0644); @@ -995,13 +687,6 @@
> MODULE_PARM_DESC(acpi_pstate_strict,
>  late_initcall(acpi_cpufreq_init);
>  module_exit(acpi_cpufreq_exit);
> 
> -static const struct x86_cpu_id acpi_cpufreq_ids[] = {
> -	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
> -	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
> -	{}
> -};
> -MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
> -
>  static const struct acpi_device_id processor_device_ids[] = {
>  	{ACPI_PROCESSOR_OBJECT_HID, },
>  	{ACPI_PROCESSOR_DEVICE_HID, },
> diff --git a/drivers/cpufreq/acpi-cpufreq.h b/drivers/cpufreq/acpi-
> cpufreq.h new file mode 100644 index 0000000..2e10c0e
> --- /dev/null
> +++ b/drivers/cpufreq/acpi-cpufreq.h
> @@ -0,0 +1,68 @@
> +#ifndef _ACPI_CPUFREQ_H
> +#define _ACPI_CPUFREQ_H
> +
> +#define PFX "acpi-cpufreq: "
> +
> +enum {
> +	UNDEFINED_CAPABLE = 0,
> +	SYSTEM_INTEL_MSR_CAPABLE,
> +	SYSTEM_AMD_MSR_CAPABLE,
> +	SYSTEM_IO_CAPABLE,
> +	SYSTEM_MEMORY_CAPABLE,
> +};
> +
> +struct msr_addr {
> +	u32 reg;
> +};
> +
> +struct io_addr {
> +	u16 port;
> +	u32 bit_width;
> +};
> +
> +struct mem_addr {
> +	u64 addr;
> +	u32 bit_width;
> +};
> +
> +struct drv_cmd {
> +	const struct cpumask *mask;
> +	union {
> +		struct msr_addr msr;
> +		struct io_addr io;
> +		struct mem_addr mem;
> +	} addr;
> +	u32 val;
> +};
> +
> +struct acpi_cpufreq_data;
> +struct acpi_cpufreq_common;
> +
> +struct acpi_cpufreq_ops {
> +	int (*arch_check)(void *info);
> +	int (*arch_feature)(struct acpi_cpufreq_data *data);
> +	void (*arch_boost_init)(struct acpi_cpufreq_common *acpi);
> +	void (*arch_boost_exit)(void);
> +	void (*drv_read)(void *cmd, struct acpi_cpufreq_data *data);
> +	void (*drv_write)(void *cmd, struct acpi_cpufreq_data *data);
> +	void (*update_boost)(bool enable, const struct cpumask *cpumask);
> +	unsigned (*extract_freq)(u32 val, struct acpi_cpufreq_data
> *data); };
> +
> +struct acpi_cpufreq_data {
> +	struct acpi_processor_performance *acpi_data;
> +	struct cpufreq_frequency_table *freq_table;
> +	unsigned int resume;
> +	unsigned int cpu_feature;
> +	cpumask_var_t freqdomain_cpus;
> +};
> +
> +struct acpi_cpufreq_common {
> +	struct acpi_cpufreq_ops *ops;
> +	bool boost_supported;
> +	bool feature_hw_pstate;
> +	bool const_loops;
> +};
> +
> +int arch_acpi_cpufreq_init(struct acpi_cpufreq_common *common);
> +#endif	/* _ACPI_CPUFREQ_H */
> diff --git a/drivers/cpufreq/acpi-cpufreq_arm.c b/drivers/cpufreq/acpi-
> cpufreq_arm.c
> new file mode 100644
> index 0000000..241c2a1
> --- /dev/null
> +++ b/drivers/cpufreq/acpi-cpufreq_arm.c
> @@ -0,0 +1,88 @@
> +#include <linux/acpi.h>
> +#include <linux/cpu.h>
> +#include <linux/io.h>
> +#include <linux/delay.h>
> +#include <linux/uaccess.h>
> +
> +#include <acpi/processor.h>
> +
> +#include <asm/cpu.h>
> +#include <asm/processor.h>
> +
> +#include "acpi-cpufreq.h"
> +
> +/* Called via smp_call_function_single(), on the target CPU */ static
> +void arm_do_drv_read(void *_cmd, struct acpi_cpufreq_data *data) {
> +	struct drv_cmd *cmd = _cmd;
> +
> +	if (data->cpu_feature != SYSTEM_MEMORY_CAPABLE)
> +		return;
> +
> +	acpi_os_read_memory((acpi_physical_address)cmd->addr.mem.addr,
> +			(u64*)&cmd->val,
> +			cmd->addr.mem.bit_width);
> +}
> +
> +/* Called via smp_call_function_many(), on the target CPUs */ static
> +void arm_do_drv_write(void *_cmd, struct acpi_cpufreq_data *data) {
> +	struct drv_cmd *cmd = _cmd;
> +
> +	if (data->cpu_feature != SYSTEM_MEMORY_CAPABLE)
> +		return;
> +
> +	acpi_os_write_memory((acpi_physical_address)cmd->addr.mem.addr,
> +			(u64)cmd->val,
> +			cmd->addr.mem.bit_width);
> +}
> +
> +static unsigned arm_extract_io(u32 value, struct acpi_cpufreq_data
> +*data) {
> +        return 0;
> +}
> +
> +static unsigned arm_extract_freq(u32 val, struct acpi_cpufreq_data
> +*data) {
> +	if (data->cpu_feature != SYSTEM_MEMORY_CAPABLE)
> +		return 0;
> +
> +	return arm_extract_io(val, data);
> +}
> +
> +static int arm_arch_feature(struct acpi_cpufreq_data *data) {
> +	struct acpi_processor_performance *perf;
> +	int result = 0;
> +
> +        perf = data->acpi_data;
> +
> +	switch (perf->control_register.space_id) {
> +	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
> +		pr_debug("SYSTEM Memory addr space\n");
> +		data->cpu_feature = SYSTEM_MEMORY_CAPABLE;
> +		break;
> +	default:
> +		pr_debug("Unknown addr space %d\n",
> +				(u32) (perf->control_register.space_id));
> +		result = -ENODEV;
> +		break;
> +	}
> +
> +	return result;
> +}
> +
> +static struct acpi_cpufreq_ops arm_ops = {
> +	.arch_feature = arm_arch_feature,
> +	.extract_freq = arm_extract_freq,
> +	.drv_read = arm_do_drv_read,
> +	.drv_write = arm_do_drv_write,
> +};
> +
> +int arch_acpi_cpufreq_init(struct acpi_cpufreq_common *common) {
> +	common->ops = &arm_ops;
> +
> +	return 0;
> +}
> +
> diff --git a/drivers/cpufreq/acpi-cpufreq_x86.c b/drivers/cpufreq/acpi-
> cpufreq_x86.c
> new file mode 100644
> index 0000000..82c9337
> --- /dev/null
> +++ b/drivers/cpufreq/acpi-cpufreq_x86.c
> @@ -0,0 +1,334 @@
> +#include <linux/module.h>
> +#include <linux/acpi.h>
> +#include <linux/cpu.h>
> +#include <linux/cpufreq.h>
> +#include <linux/io.h>
> +
> +#include <acpi/processor.h>
> +
> +#include <asm/msr.h>
> +#include <asm/cpufeature.h>
> +#include <asm/processor.h>
> +
> +#include "acpi-cpufreq.h"
> +
> +#define INTEL_MSR_RANGE		(0xffff)
> +#define AMD_MSR_RANGE		(0x7)
> +
> +#define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
> +
> +static struct msr __percpu *msrs;
> +
> +static void x86_boost_set_msrs(bool enable, const struct cpumask
> +*cpumask) {
> +	u32 cpu;
> +	u32 msr_addr;
> +	u64 msr_mask;
> +
> +	switch (boot_cpu_data.x86_vendor) {
> +	case X86_VENDOR_INTEL:
> +		msr_addr = MSR_IA32_MISC_ENABLE;
> +		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
> +		break;
> +	case X86_VENDOR_AMD:
> +		msr_addr = MSR_K7_HWCR;
> +		msr_mask = MSR_K7_HWCR_CPB_DIS;
> +		break;
> +	default:
> +		return;
> +	}
> +
> +	rdmsr_on_cpus(cpumask, msr_addr, msrs);
> +
> +	for_each_cpu(cpu, cpumask) {
> +		struct msr *reg = per_cpu_ptr(msrs, cpu);
> +		if (enable)
> +			reg->q &= ~msr_mask;
> +		else
> +			reg->q |= msr_mask;
> +	}
> +
> +	wrmsr_on_cpus(cpumask, msr_addr, msrs); }
> +
> +static bool boost_state(unsigned int cpu) {
> +	u32 lo, hi;
> +	u64 msr;
> +
> +	switch (boot_cpu_data.x86_vendor) {
> +	case X86_VENDOR_INTEL:
> +		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
> +		msr = lo | ((u64)hi << 32);
> +		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
> +	case X86_VENDOR_AMD:
> +		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
> +		msr = lo | ((u64)hi << 32);
> +		return !(msr & MSR_K7_HWCR_CPB_DIS);
> +	}
> +	return false;
> +}
> +
> +#ifdef CONFIG_SMP
> +static int x86_arch_check(void *data)
> +{
> +	struct cpuinfo_x86 *c = data;
> +	/* Intel Xeon Processor 7100 Series Specification Update
> +	 * 
http://www.intel.com/Assets/PDF/specupdate/314554.pdf
> +	 * AL30: A Machine Check Exception (MCE) Occurring during an
> +	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
> +	 * Both Processor Cores to Lock Up. */
> +	if (c->x86_vendor == X86_VENDOR_INTEL) {
> +		if ((c->x86 == 15) &&
> +		    (c->x86_model == 6) &&
> +		    (c->x86_mask == 8)) {
> +			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
> +			    "Xeon(R) 7100 Errata AL30, processors may "
> +			    "lock up on frequency changes: disabling "
> +			    "acpi-cpufreq.\n");
> +			return -ENODEV;
> +		}
> +	}
> +	return 0;
> +}
> +#else
> +#define x86_arch_check	NULL
> +#endif
> +
> +/* Called via smp_call_function_single(), on the target CPU */ static
> +void x86_do_drv_read(void *_cmd, struct acpi_cpufreq_data *data) {
> +	struct acpi_processor_performance *perf;
> +	struct drv_cmd *cmd = _cmd;
> +	u32 h;
> +
> +	switch (data->cpu_feature) {
> +	case SYSTEM_INTEL_MSR_CAPABLE:
> +		cmd->addr.msr.reg = MSR_IA32_PERF_CTL;
> +		rdmsr(cmd->addr.msr.reg, cmd->val, h);
> +		break;
> +	case SYSTEM_AMD_MSR_CAPABLE:
> +		cmd->addr.msr.reg = MSR_AMD_PERF_CTL;
> +		rdmsr(cmd->addr.msr.reg, cmd->val, h);
> +		break;
> +	case SYSTEM_IO_CAPABLE:
> +		perf = data->acpi_data;
> +		cmd->addr.io.port = perf->control_register.address;
> +		cmd->addr.io.bit_width = perf->control_register.bit_width;
> +		acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
> +				&cmd->val,
> +				(u32)cmd->addr.io.bit_width);
> +		break;
> +	}
> +}
> +
> +/* Called via smp_call_function_many(), on the target CPUs */ static
> +void x86_do_drv_write(void *_cmd, struct acpi_cpufreq_data *data) {
> +	struct acpi_processor_performance *perf;
> +	struct drv_cmd *cmd = _cmd;
> +	u32 lo, hi;
> +
> +	perf = data->acpi_data;
> +
> +	switch (data->cpu_feature) {
> +	case SYSTEM_INTEL_MSR_CAPABLE:
> +		cmd->addr.msr.reg = MSR_IA32_PERF_CTL;
> +		rdmsr(cmd->addr.msr.reg, lo, hi);
> +		lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
> +		wrmsr(cmd->addr.msr.reg, lo, hi);
> +		break;
> +	case SYSTEM_AMD_MSR_CAPABLE:
> +		cmd->addr.msr.reg = MSR_AMD_PERF_CTL;
> +		wrmsr(cmd->addr.msr.reg, cmd->val, 0);
> +		break;
> +	case SYSTEM_IO_CAPABLE:
> +		cmd->addr.io.port = perf->control_register.address;
> +		cmd->addr.io.bit_width = perf->control_register.bit_width;
> +		acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
> +				cmd->val,
> +				(u32)cmd->addr.io.bit_width);
> +		break;
> +	}
> +}
> +
> +static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) {
> +        struct acpi_processor_performance *perf;
> +        int i;
> +
> +        perf = data->acpi_data;
> +
> +        for (i = 0; i < perf->state_count; i++) {
> +                if (value == perf->states[i].status)
> +                        return data->freq_table[i].frequency;
> +        }
> +        return 0;
> +}
> +
> +static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) {
> +        int i;
> +        struct acpi_processor_performance *perf;
> +
> +        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
> +                msr &= AMD_MSR_RANGE;
> +        else
> +                msr &= INTEL_MSR_RANGE;
> +
> +        perf = data->acpi_data;
> +
> +        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END;
i++)
> {
> +                if (msr == perf->states[data-
> >freq_table[i].driver_data].status)
> +                        return data->freq_table[i].frequency;
> +        }
> +        return data->freq_table[0].frequency; }
> +
> +unsigned x86_extract_freq(u32 val, struct acpi_cpufreq_data *data) {
> +	switch (data->cpu_feature) {
> +	case SYSTEM_INTEL_MSR_CAPABLE:
> +	case SYSTEM_AMD_MSR_CAPABLE:
> +		return extract_msr(val, data);
> +	case SYSTEM_IO_CAPABLE:
> +		return extract_io(val, data);
> +	default:
> +		return 0;
> +	}
> +}
> +
> +static int boost_notify(struct notifier_block *nb, unsigned long action,
> +		      void *hcpu)
> +{
> +	unsigned cpu = (long)hcpu;
> +	const struct cpumask *cpumask;
> +
> +	cpumask = get_cpu_mask(cpu);
> +
> +	/*
> +	 * Clear the boost-disable bit on the CPU_DOWN path so that
> +	 * this cpu cannot block the remaining ones from boosting. On
> +	 * the CPU_UP path we simply keep the boost-disable flag in
> +	 * sync with the current global state.
> +	 */
> +
> +	switch (action) {
> +	case CPU_UP_PREPARE:
> +	case CPU_UP_PREPARE_FROZEN:
> +		x86_boost_set_msrs(boost_state(0), cpumask);
> +		break;
> +
> +	case CPU_DOWN_PREPARE:
> +	case CPU_DOWN_PREPARE_FROZEN:
> +		x86_boost_set_msrs(1, cpumask);
> +		break;
> +
> +	default:
> +		break;
> +	}
> +
> +	return NOTIFY_OK;
> +}
> +
> +static struct notifier_block boost_nb = {
> +	.notifier_call          = boost_notify,
> +};
> +
> +static void x86_arch_boost_init(struct acpi_cpufreq_common *acpi) {
> +        if (boot_cpu_has(X86_FEATURE_CPB) ||
boot_cpu_has(X86_FEATURE_IDA))
> {
> +                msrs = msrs_alloc();
> +
> +		if (!msrs)
> +			return;
> +
> +		acpi->boost_supported = true;
> +
> +                get_online_cpus();
> +
> +		x86_boost_set_msrs(boost_state(0), cpu_online_mask);
> +
> +                register_cpu_notifier(&boost_nb);
> +
> +                put_online_cpus();
> +        }
> +}
> +
> +static void x86_arch_boost_exit(void)
> +{
> +        if (msrs) {
> +                unregister_cpu_notifier(&boost_nb);
> +
> +                msrs_free(msrs);
> +                msrs = NULL;
> +        }
> +}
> +
> +static int x86_arch_feature(struct acpi_cpufreq_data *data) {
> +	struct acpi_processor_performance *perf;
> +	int result = 0;
> +
> +        perf = data->acpi_data;
> +
> +	switch (perf->control_register.space_id) {
> +	case ACPI_ADR_SPACE_SYSTEM_IO:
> +		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
> +		    boot_cpu_data.x86 == 0xf) {
> +			pr_debug("AMD K8 systems must use native
drivers.\n");
> +			result = -ENODEV;
> +			break;
> +		}
> +		pr_debug("SYSTEM IO addr space\n");
> +		data->cpu_feature = SYSTEM_IO_CAPABLE;
> +		break;
> +	case ACPI_ADR_SPACE_FIXED_HARDWARE:
> +		pr_debug("HARDWARE addr space\n");
> +		if (boot_cpu_has(X86_FEATURE_EST)) {
> +			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
> +			break;
> +		}
> +		if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
> +			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
> +			break;
> +		}
> +		result = -ENODEV;
> +		break;
> +	default:
> +		pr_debug("Unknown addr space %d\n",
> +				(u32) (perf->control_register.space_id));
> +		result = -ENODEV;
> +		break;
> +	}
> +
> +	return result;
> +}
> +
> +static struct acpi_cpufreq_ops x86_ops = {
> +	.arch_check = x86_arch_check,
> +	.arch_feature = x86_arch_feature,
> +	.arch_boost_init = x86_arch_boost_init,
> +	.arch_boost_exit = x86_arch_boost_exit,
> +	.drv_read = x86_do_drv_read,
> +	.drv_write = x86_do_drv_write,
> +	.update_boost = x86_boost_set_msrs,
> +	.extract_freq = x86_extract_freq,
> +};
> +
> +int __init arch_acpi_cpufreq_init(struct acpi_cpufreq_common *acpi) {
> +	acpi->ops = &x86_ops;
> +
> +	acpi->feature_hw_pstate = boot_cpu_has(X86_FEATURE_HW_PSTATE);
> +	acpi->const_loops = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
> +
> +	return 0;
> +}
> +
> +static const struct x86_cpu_id acpi_cpufreq_ids[] = {
> +	X86_FEATURE_MATCH(X86_FEATURE_ACPI),
> +	X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
> +	{}
> +};
> +MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
> diff --git a/include/linux/acpi.h b/include/linux/acpi.h index
> 85c60f3..9630ccc 100644
> --- a/include/linux/acpi.h
> +++ b/include/linux/acpi.h
> @@ -115,6 +115,7 @@ static inline void acpi_initrd_override(void *data,
> size_t size)
> 
>  char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
> void __acpi_unmap_table(char *map, unsigned long size);
> +int early_acpi_boot_init(void);
>  int acpi_boot_init (void);
>  void acpi_boot_table_init (void);
>  int acpi_mps_check (void);
> --
> 1.7.10.4