Separated acpi-cpufreq into 3 parts. acpi-cpufreq has a common code for both x86 and arm. acpi-cpufreq_x86 is for x86-specific codes and acpi-cpufreq_arm is for arm-specific codes.
Signed-off-by: Jonghwan Choi jhbird.choi@samsung.com --- drivers/cpufreq/Makefile | 6 + drivers/cpufreq/acpi-cpufreq.c | 394 +++++------------------------------- drivers/cpufreq/acpi-cpufreq.h | 69 +++++++ drivers/cpufreq/acpi-cpufreq_arm.c | 46 +++++ drivers/cpufreq/acpi-cpufreq_x86.c | 300 +++++++++++++++++++++++++++ 5 files changed, 471 insertions(+), 344 deletions(-) create mode 100644 drivers/cpufreq/acpi-cpufreq.h create mode 100644 drivers/cpufreq/acpi-cpufreq_arm.c create mode 100644 drivers/cpufreq/acpi-cpufreq_x86.c
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index bfeb0fb..eda425a 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -14,6 +14,12 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o obj-$(CONFIG_ACPI_CPUFREQ) += acpi-cpufreq.o +ifdef CONFIG_X86 +obj-$(CONFIG_ACPI_CPUFREQ) += acpi-cpufreq_x86.o +endif +ifdef CONFIG_ARM64 +obj-$(CONFIG_ACPI_CPUFREQ) += acpi-cpufreq_arm.o +endif
############################################################################ ###### # x86 drivers. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 80662b1..6f9749b 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -42,39 +42,17 @@
#include <acpi/processor.h>
-#ifdef CONFIG_X86 -#include <asm/msr.h> -#endif +#include <asm/cpu.h> #include <asm/processor.h> -#include <asm/cpufeature.h> + +#include "acpi-cpufreq.h"
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL");
-#define PFX "acpi-cpufreq: " - -enum { - UNDEFINED_CAPABLE = 0, - SYSTEM_INTEL_MSR_CAPABLE, - SYSTEM_AMD_MSR_CAPABLE, - SYSTEM_IO_CAPABLE, -}; - -#define INTEL_MSR_RANGE (0xffff) -#define AMD_MSR_RANGE (0x7) - -#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) - -struct acpi_cpufreq_data { - struct acpi_processor_performance *acpi_data; - struct cpufreq_frequency_table *freq_table; - unsigned int resume; - unsigned int cpu_feature; - cpumask_var_t freqdomain_cpus; -}; - static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); +static struct acpi_cpufreq_common *acpi;
/* acpi_perf_data is a pointer to percpu data. */ static struct acpi_processor_performance __percpu *acpi_perf_data; @@ -82,66 +60,12 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict; -static struct msr __percpu *msrs; - -static bool boost_state(unsigned int cpu) -{ -#ifdef CONFIG_X86 - u32 lo, hi; - u64 msr; - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); - msr = lo | ((u64)hi << 32); - return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); - case X86_VENDOR_AMD: - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - msr = lo | ((u64)hi << 32); - return !(msr & MSR_K7_HWCR_CPB_DIS); - } -#endif - return false; -} - -static void boost_set_msrs(bool enable, const struct cpumask *cpumask) -{ - u32 cpu; - u32 msr_addr; - u64 msr_mask; - -#ifdef CONFIG_X86 - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - msr_addr = MSR_IA32_MISC_ENABLE; - msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; - break; - case X86_VENDOR_AMD: - msr_addr = MSR_K7_HWCR; - msr_mask = MSR_K7_HWCR_CPB_DIS; - break; - default: - return; - } - - rdmsr_on_cpus(cpumask, msr_addr, msrs); - - for_each_cpu(cpu, cpumask) { - struct msr *reg = per_cpu_ptr(msrs, cpu); - if (enable) - reg->q &= ~msr_mask; - else - reg->q |= msr_mask; - } - - wrmsr_on_cpus(cpumask, msr_addr, msrs); -#endif -}
static int _store_boost(int val) { get_online_cpus(); - boost_set_msrs(val, cpu_online_mask); + if (acpi->ops->update_boost) + acpi->ops->update_boost(val, cpu_online_mask); put_online_cpus(); pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
@@ -157,13 +81,12 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(freqdomain_cpus);
-#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB static ssize_t store_boost(const char *buf, size_t count) { int ret; unsigned long val = 0;
- if (!acpi_cpufreq_driver.boost_supported) + if (!acpi->boost_supported) return -EINVAL;
ret = kstrtoul(buf, 10, &val); @@ -187,25 +110,6 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) }
cpufreq_freq_attr_rw(cpb); -#endif - -static int check_est_cpu(unsigned int cpuid) -{ -#ifdef CONFIG_X86 - struct cpuinfo_x86 *cpu = &cpu_data(cpuid); - - return cpu_has(cpu, X86_FEATURE_EST); -#endif -} - -static int check_amd_hwpstate_cpu(unsigned int cpuid) -{ -#ifdef CONFIG_X86 - struct cpuinfo_x86 *cpu = &cpu_data(cpuid); - - return cpu_has(cpu, X86_FEATURE_HW_PSTATE); -#endif -}
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) { @@ -221,75 +125,39 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) return 0; }
-static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) -{ - int i; - struct acpi_processor_performance *perf; -#ifdef CONFIG_X86 - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - msr &= AMD_MSR_RANGE; - else - msr &= INTEL_MSR_RANGE; -#endif - - perf = data->acpi_data; - - for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { - if (msr == perf->states[data->freq_table[i].driver_data].status) - return data->freq_table[i].frequency; - } - return data->freq_table[0].frequency; -}
static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) { switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_AMD_MSR_CAPABLE: - return extract_msr(val, data); + return acpi->ops->extract_freq(val, data); case SYSTEM_IO_CAPABLE: + case SYSTEM_MEMORY_CAPABLE: return extract_io(val, data); default: return 0; } }
-struct msr_addr { - u32 reg; -}; - -struct io_addr { - u16 port; - u8 bit_width; -}; - -struct drv_cmd { - unsigned int type; - const struct cpumask *mask; - union { - struct msr_addr msr; - struct io_addr io; - } addr; - u32 val; -}; - /* Called via smp_call_function_single(), on the target CPU */ static void do_drv_read(void *_cmd) { struct drv_cmd *cmd = _cmd; - u32 h;
switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_AMD_MSR_CAPABLE: -#ifdef CONFIG_X86 - rdmsr(cmd->addr.msr.reg, cmd->val, h); -#endif + acpi->ops->drv_read(cmd); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, &cmd->val, (u32)cmd->addr.io.bit_width); + case SYSTEM_MEMORY_CAPABLE: + acpi_os_read_memory((acpi_physical_address)cmd->addr.mem.addr, + (u64*)&cmd->val, + cmd->addr.mem.bit_width); break; default: break; @@ -300,25 +168,20 @@ static void do_drv_read(void *_cmd) static void do_drv_write(void *_cmd) { struct drv_cmd *cmd = _cmd; - u32 lo, hi;
switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: -#ifdef CONFIG_X86 - rdmsr(cmd->addr.msr.reg, lo, hi); - lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); - wrmsr(cmd->addr.msr.reg, lo, hi); -#endif - break; case SYSTEM_AMD_MSR_CAPABLE: -#ifdef CONFIG_X86 - wrmsr(cmd->addr.msr.reg, cmd->val, 0); -#endif + acpi->ops->drv_write(cmd); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, cmd->val, (u32)cmd->addr.io.bit_width); + case SYSTEM_MEMORY_CAPABLE: + acpi_os_write_memory((acpi_physical_address)cmd->addr.mem.addr, + (u64)cmd->val, + cmd->addr.mem.bit_width); break; default: break; @@ -347,34 +210,13 @@ static void drv_write(struct drv_cmd *cmd)
static u32 get_cur_val(const struct cpumask *mask) { - struct acpi_processor_performance *perf; struct drv_cmd cmd;
if (unlikely(cpumask_empty(mask))) return 0;
- switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { -#ifdef CONFIG_X86 - case SYSTEM_INTEL_MSR_CAPABLE: - cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - break; - case SYSTEM_AMD_MSR_CAPABLE: - cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_CTL; - break; -#endif - case SYSTEM_IO_CAPABLE: - cmd.type = SYSTEM_IO_CAPABLE; - perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; - cmd.addr.io.port = perf->control_register.address; - cmd.addr.io.bit_width = perf->control_register.bit_width; - break; - default: - return 0; - } - cmd.mask = mask; + drv_read(&cmd);
pr_debug("get_cur_val = %u\n", cmd.val); @@ -453,36 +295,14 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } }
- switch (data->cpu_feature) { -#ifdef CONFIG_X86 - case SYSTEM_INTEL_MSR_CAPABLE: - cmd.type = SYSTEM_INTEL_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_IA32_PERF_CTL; - cmd.val = (u32) perf->states[next_perf_state].control; - break; - case SYSTEM_AMD_MSR_CAPABLE: - cmd.type = SYSTEM_AMD_MSR_CAPABLE; - cmd.addr.msr.reg = MSR_AMD_PERF_CTL; - cmd.val = (u32) perf->states[next_perf_state].control; - break; -#endif - case SYSTEM_IO_CAPABLE: - cmd.type = SYSTEM_IO_CAPABLE; - cmd.addr.io.port = perf->control_register.address; - cmd.addr.io.bit_width = perf->control_register.bit_width; - cmd.val = (u32) perf->states[next_perf_state].control; - break; - default: - result = -ENODEV; - goto out; - } - /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) cmd.mask = policy->cpus; else cmd.mask = cpumask_of(policy->cpu);
+ cmd.val = (u32) perf->states[next_perf_state].control; + drv_write(&cmd);
if (acpi_pstate_strict) { @@ -540,44 +360,6 @@ static void free_acpi_perf_data(void) free_percpu(acpi_perf_data); }
-static int boost_notify(struct notifier_block *nb, unsigned long action, - void *hcpu) -{ - unsigned cpu = (long)hcpu; - const struct cpumask *cpumask; - - cpumask = get_cpu_mask(cpu); - - /* - * Clear the boost-disable bit on the CPU_DOWN path so that - * this cpu cannot block the remaining ones from boosting. On - * the CPU_UP path we simply keep the boost-disable flag in - * sync with the current global state. - */ - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); - break; - - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - boost_set_msrs(1, cpumask); - break; - - default: - break; - } - - return NOTIFY_OK; -} - - -static struct notifier_block boost_nb = { - .notifier_call = boost_notify, -}; - /* * acpi_cpufreq_early_init - initialize ACPI P-States library * @@ -639,29 +421,6 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { }, { } }; - -static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) -{ - /* Intel Xeon Processor 7100 Series Specification Update - * http://www.intel.com/Assets/PDF/specupdate/314554.pdf - * AL30: A Machine Check Exception (MCE) Occurring during an - * Enhanced Intel SpeedStep Technology Ratio Change May Cause - * Both Processor Cores to Lock Up. */ -#ifdef CONFIG_X86 - if (c->x86_vendor == X86_VENDOR_INTEL) { - if ((c->x86 == 15) && - (c->x86_model == 6) && - (c->x86_mask == 8)) { - printk(KERN_INFO "acpi-cpufreq: Intel(R) " - "Xeon(R) 7100 Errata AL30, processors may " - "lock up on frequency changes: disabling " - "acpi-cpufreq.\n"); - return -ENODEV; - } - } -#endif - return 0; -} #endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -671,11 +430,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; -#ifdef CONFIG_X86 - struct cpuinfo_x86 *c = &cpu_data(policy->cpu); -#else - struct cpuinfo_x86 *c = NULL; -#endif + void *c = &cpu_data(policy->cpu); struct acpi_processor_performance *perf; #ifdef CONFIG_SMP static int blacklisted; @@ -686,9 +441,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) #ifdef CONFIG_SMP if (blacklisted) return blacklisted; - blacklisted = acpi_cpufreq_blacklist(c); - if (blacklisted) - return blacklisted; + if (acpi->ops->arch_check) { + blacklisted = acpi->ops->arch_check(c); + if (blacklisted) + return blacklisted; + } #endif
data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -703,10 +460,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(acfreq_data, cpu) = data;
-#ifdef CONFIG_X86 - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) + if (acpi->const_loops) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; -#endif
result = acpi_processor_register_performance(data->acpi_data, cpu); if (result) @@ -734,7 +489,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) #endif }
- if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { + if (acpi->feature_hw_pstate && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); #ifdef CONFIG_X86 @@ -757,34 +512,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; }
- switch (perf->control_register.space_id) { - case ACPI_ADR_SPACE_SYSTEM_IO: -#ifdef CONFIG_X86 - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 0xf) { - pr_debug("AMD K8 systems must use native drivers.\n"); - result = -ENODEV; - goto err_unreg; - } -#endif - pr_debug("SYSTEM IO addr space\n"); - data->cpu_feature = SYSTEM_IO_CAPABLE; - break; - case ACPI_ADR_SPACE_FIXED_HARDWARE: - pr_debug("HARDWARE addr space\n"); - if (check_est_cpu(cpu)) { - data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; - break; - } - if (check_amd_hwpstate_cpu(cpu)) { - data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; - break; - } - result = -ENODEV; - goto err_unreg; - default: - pr_debug("Unknown addr space %d\n", - (u32) (perf->control_register.space_id)); + if (acpi->ops->arch_feature(data)) { result = -ENODEV; goto err_unreg; } @@ -933,39 +661,14 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static void __init acpi_cpufreq_boost_init(void) { -#ifdef CONFIG_X86 - if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { - msrs = msrs_alloc(); - - if (!msrs) - return; - - acpi_cpufreq_driver.boost_supported = true; - acpi_cpufreq_driver.boost_enabled = boost_state(0); - - cpu_notifier_register_begin(); - - /* Force all MSRs to the same value */ - boost_set_msrs(acpi_cpufreq_driver.boost_enabled, - cpu_online_mask); - - __register_cpu_notifier(&boost_nb); - - cpu_notifier_register_done(); - } -#endif + if (acpi->ops->arch_boost_init) + acpi->ops->arch_boost_init(acpi); }
static void acpi_cpufreq_boost_exit(void) { -#ifdef CONFIG_X86 - if (msrs) { - unregister_cpu_notifier(&boost_nb); - - msrs_free(msrs); - msrs = NULL; - } -#endif + if (acpi->ops->arch_boost_exit) + acpi->ops->arch_boost_exit(); }
static int __init acpi_cpufreq_init(void) @@ -985,14 +688,23 @@ static int __init acpi_cpufreq_init(void) if (ret) return ret;
-#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB + acpi = kzalloc(sizeof(*acpi), GFP_KERNEL); + if (!acpi) + return -ENOMEM; + + if (arch_acpi_cpufreq_init(acpi)) { + kfree(acpi); + return -ENOMEM; + } + + /* this is a sysfs file with a strange name and an even stranger * semantic - per CPU instantiation, but system global effect. * Lets enable it only on AMD CPUs for compatibility reasons and * only if configured. This is considered legacy code, which * will probably be removed at some point in the future. */ - if (check_amd_hwpstate_cpu(0)) { + if (acpi->feature_hw_pstate) { struct freq_attr **iter;
pr_debug("adding sysfs entry for cpb\n"); @@ -1004,11 +716,12 @@ static int __init acpi_cpufreq_init(void) if (iter[1] == NULL) *iter = &cpb; } -#endif + acpi_cpufreq_boost_init();
ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) { + kfree(acpi); free_acpi_perf_data(); acpi_cpufreq_boost_exit(); } @@ -1024,6 +737,8 @@ static void __exit acpi_cpufreq_exit(void) cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data(); + + kfree(acpi); }
module_param(acpi_pstate_strict, uint, 0644); @@ -1034,15 +749,6 @@ MODULE_PARM_DESC(acpi_pstate_strict, late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit);
-#ifdef CONFIG_X86 -static const struct x86_cpu_id acpi_cpufreq_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_ACPI), - X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); -#endif - static const struct acpi_device_id processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, }, {ACPI_PROCESSOR_DEVICE_HID, }, diff --git a/drivers/cpufreq/acpi-cpufreq.h b/drivers/cpufreq/acpi-cpufreq.h new file mode 100644 index 0000000..ddef14f --- /dev/null +++ b/drivers/cpufreq/acpi-cpufreq.h @@ -0,0 +1,69 @@ +#ifndef _ACPI_CPUFREQ_H +#define _ACPI_CPUFREQ_H + +#define PFX "acpi-cpufreq: " + +enum { + UNDEFINED_CAPABLE = 0, + SYSTEM_INTEL_MSR_CAPABLE, + SYSTEM_AMD_MSR_CAPABLE, + SYSTEM_IO_CAPABLE, + SYSTEM_MEMORY_CAPABLE, +}; + +struct msr_addr { + u32 reg; +}; + +struct io_addr { + u16 port; + u32 bit_width; +}; + +struct mem_addr { + u64 addr; + u32 bit_width; +}; + +struct drv_cmd { + unsigned int type; + const struct cpumask *mask; + union { + struct msr_addr msr; + struct io_addr io; + struct mem_addr mem; + } addr; + u32 val; +}; + +struct acpi_cpufreq_data; +struct acpi_cpufreq_common; + +struct acpi_cpufreq_ops { + int (*arch_check)(void *info); + int (*arch_feature)(struct acpi_cpufreq_data *data); + void (*arch_boost_init)(struct acpi_cpufreq_common *acpi); + void (*arch_boost_exit)(void); + void (*drv_read)(struct drv_cmd *cmd); + void (*drv_write)(struct drv_cmd *cmd); + void (*update_boost)(bool enable, const struct cpumask *cpumask); + unsigned (*extract_freq)(u32 val, struct acpi_cpufreq_data *data); +}; + +struct acpi_cpufreq_data { + struct acpi_processor_performance *acpi_data; + struct cpufreq_frequency_table *freq_table; + unsigned int resume; + unsigned int cpu_feature; + cpumask_var_t freqdomain_cpus; +}; + +struct acpi_cpufreq_common { + struct acpi_cpufreq_ops *ops; + bool boost_supported; + bool feature_hw_pstate; + bool const_loops; +}; + +int arch_acpi_cpufreq_init(struct acpi_cpufreq_common *common); +#endif /* _ACPI_CPUFREQ_H */ diff --git a/drivers/cpufreq/acpi-cpufreq_arm.c b/drivers/cpufreq/acpi-cpufreq_arm.c new file mode 100644 index 0000000..60ec8aa --- /dev/null +++ b/drivers/cpufreq/acpi-cpufreq_arm.c @@ -0,0 +1,46 @@ +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/uaccess.h> + +#include <acpi/processor.h> + +#include <asm/cpu.h> +#include <asm/processor.h> + +#include "acpi-cpufreq.h" + +static int arm_arch_feature(struct acpi_cpufreq_data *data) +{ + struct acpi_processor_performance *perf; + int result = 0; + + perf = data->acpi_data; + + switch (perf->control_register.space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + pr_debug("SYSTEM Memory addr space\n"); + data->cpu_feature = SYSTEM_MEMORY_CAPABLE; + break; + default: + pr_debug("Unknown addr space %d\n", + (u32) (perf->control_register.space_id)); + result = -ENODEV; + break; + } + + return result; +} + +static struct acpi_cpufreq_ops arm_ops = { + .arch_feature = arm_arch_feature, +}; + +int arch_acpi_cpufreq_init(struct acpi_cpufreq_common *common) +{ + common->ops = &arm_ops; + + return 0; +} + diff --git a/drivers/cpufreq/acpi-cpufreq_x86.c b/drivers/cpufreq/acpi-cpufreq_x86.c new file mode 100644 index 0000000..5fe36e4 --- /dev/null +++ b/drivers/cpufreq/acpi-cpufreq_x86.c @@ -0,0 +1,300 @@ +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/io.h> + +#include <acpi/processor.h> + +#include <asm/msr.h> +#include <asm/cpufeature.h> +#include <asm/processor.h> + +#include "acpi-cpufreq.h" + +#define INTEL_MSR_RANGE (0xffff) +#define AMD_MSR_RANGE (0x7) + +#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) + +static struct msr __percpu *msrs; + +static void x86_boost_set_msrs(bool enable, const struct cpumask *cpumask) +{ + u32 cpu; + u32 msr_addr; + u64 msr_mask; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + msr_addr = MSR_IA32_MISC_ENABLE; + msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + break; + case X86_VENDOR_AMD: + msr_addr = MSR_K7_HWCR; + msr_mask = MSR_K7_HWCR_CPB_DIS; + break; + default: + return; + } + + rdmsr_on_cpus(cpumask, msr_addr, msrs); + + for_each_cpu(cpu, cpumask) { + struct msr *reg = per_cpu_ptr(msrs, cpu); + if (enable) + reg->q &= ~msr_mask; + else + reg->q |= msr_mask; + } + + wrmsr_on_cpus(cpumask, msr_addr, msrs); +} + +static bool boost_state(unsigned int cpu) +{ + u32 lo, hi; + u64 msr; + + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); + msr = lo | ((u64)hi << 32); + return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + case X86_VENDOR_AMD: + rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); + msr = lo | ((u64)hi << 32); + return !(msr & MSR_K7_HWCR_CPB_DIS); + } + return false; +} + +#ifdef CONFIG_SMP +static int x86_arch_check(void *data) +{ + struct cpuinfo_x86 *c = data; + /* Intel Xeon Processor 7100 Series Specification Update + * http://www.intel.com/Assets/PDF/specupdate/314554.pdf + * AL30: A Machine Check Exception (MCE) Occurring during an + * Enhanced Intel SpeedStep Technology Ratio Change May Cause + * Both Processor Cores to Lock Up. */ + if (c->x86_vendor == X86_VENDOR_INTEL) { + if ((c->x86 == 15) && + (c->x86_model == 6) && + (c->x86_mask == 8)) { + printk(KERN_INFO "acpi-cpufreq: Intel(R) " + "Xeon(R) 7100 Errata AL30, processors may " + "lock up on frequency changes: disabling " + "acpi-cpufreq.\n"); + return -ENODEV; + } + } + return 0; +} +#else +#define x86_arch_check NULL +#endif + +static void x86_do_drv_read(struct drv_cmd *cmd) +{ + u32 h; + + switch (cmd->type) { + case SYSTEM_INTEL_MSR_CAPABLE: + cmd->addr.msr.reg = MSR_IA32_PERF_CTL; + rdmsr(cmd->addr.msr.reg, cmd->val, h); + break; + case SYSTEM_AMD_MSR_CAPABLE: + cmd->addr.msr.reg = MSR_AMD_PERF_CTL; + rdmsr(cmd->addr.msr.reg, cmd->val, h); + break; + default: + break; + } +} + +/* Called via smp_call_function_many(), on the target CPUs */ +static void x86_do_drv_write(struct drv_cmd *cmd) +{ + u32 lo, hi; + + switch (cmd->type) { + case SYSTEM_INTEL_MSR_CAPABLE: + cmd->addr.msr.reg = MSR_IA32_PERF_CTL; + rdmsr(cmd->addr.msr.reg, lo, hi); + lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); + wrmsr(cmd->addr.msr.reg, lo, hi); + break; + case SYSTEM_AMD_MSR_CAPABLE: + cmd->addr.msr.reg = MSR_AMD_PERF_CTL; + wrmsr(cmd->addr.msr.reg, cmd->val, 0); + break; + default: + break; + } +} + +static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) +{ + int i; + struct acpi_processor_performance *perf; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + msr &= AMD_MSR_RANGE; + else + msr &= INTEL_MSR_RANGE; + + perf = data->acpi_data; + + for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (msr == perf->states[data->freq_table[i].driver_data].status) + return data->freq_table[i].frequency; + } + return data->freq_table[0].frequency; +} + +unsigned x86_extract_freq(u32 val, struct acpi_cpufreq_data *data) +{ + switch (data->cpu_feature) { + case SYSTEM_INTEL_MSR_CAPABLE: + case SYSTEM_AMD_MSR_CAPABLE: + return extract_msr(val, data); + default: + return 0; + } +} + +static int boost_notify(struct notifier_block *nb, unsigned long action, + void *hcpu) +{ + unsigned cpu = (long)hcpu; + const struct cpumask *cpumask; + + cpumask = get_cpu_mask(cpu); + + /* + * Clear the boost-disable bit on the CPU_DOWN path so that + * this cpu cannot block the remaining ones from boosting. On + * the CPU_UP path we simply keep the boost-disable flag in + * sync with the current global state. + */ + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + x86_boost_set_msrs(boost_state(0), cpumask); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + x86_boost_set_msrs(1, cpumask); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block boost_nb = { + .notifier_call = boost_notify, +}; + +static void x86_arch_boost_init(struct acpi_cpufreq_common *acpi) +{ + if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { + msrs = msrs_alloc(); + + if (!msrs) + return; + + acpi->boost_supported = true; + + get_online_cpus(); + + x86_boost_set_msrs(boost_state(0), cpu_online_mask); + + register_cpu_notifier(&boost_nb); + + put_online_cpus(); + } +} + +static void x86_arch_boost_exit(void) +{ + if (msrs) { + unregister_cpu_notifier(&boost_nb); + + msrs_free(msrs); + msrs = NULL; + } +} + +static int x86_arch_feature(struct acpi_cpufreq_data *data) +{ + struct acpi_processor_performance *perf; + int result = 0; + + perf = data->acpi_data; + + switch (perf->control_register.space_id) { + case ACPI_ADR_SPACE_SYSTEM_IO: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 == 0xf) { + pr_debug("AMD K8 systems must use native drivers.\n"); + result = -ENODEV; + break; + } + pr_debug("SYSTEM IO addr space\n"); + data->cpu_feature = SYSTEM_IO_CAPABLE; + break; + case ACPI_ADR_SPACE_FIXED_HARDWARE: + pr_debug("HARDWARE addr space\n"); + if (boot_cpu_has(X86_FEATURE_EST)) { + data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; + break; + } + if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { + data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; + break; + } + result = -ENODEV; + break; + default: + pr_debug("Unknown addr space %d\n", + (u32) (perf->control_register.space_id)); + result = -ENODEV; + break; + } + + return result; +} + +static struct acpi_cpufreq_ops x86_ops = { + .arch_check = x86_arch_check, + .arch_feature = x86_arch_feature, + .arch_boost_init = x86_arch_boost_init, + .arch_boost_exit = x86_arch_boost_exit, + .drv_read = x86_do_drv_read, + .drv_write = x86_do_drv_write, + .update_boost = x86_boost_set_msrs, + .extract_freq = x86_extract_freq, +}; + +int __init arch_acpi_cpufreq_init(struct acpi_cpufreq_common *acpi) +{ + acpi->ops = &x86_ops; + + acpi->feature_hw_pstate = boot_cpu_has(X86_FEATURE_HW_PSTATE); + acpi->const_loops = boot_cpu_has(X86_FEATURE_CONSTANT_TSC); + + return 0; +} + +static const struct x86_cpu_id acpi_cpufreq_ids[] = { + X86_FEATURE_MATCH(X86_FEATURE_ACPI), + X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);