To allow compilation of drivers & acpi with arm, use some hacks, Later, these hacks will be replaced with proper solutions.
Signed-off-by: Jonghwan Choi jhbird.choi@samsung.com --- drivers/acpi/acpi_processor.c | 4 +++ drivers/acpi/processor_idle.c | 14 +++++++++++ drivers/cpufreq/acpi-cpufreq.c | 53 +++++++++++++++++++++++++++++++++++----- 3 files changed, 65 insertions(+), 6 deletions(-)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c29c2c3..6a89856 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -177,6 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) cpu_maps_update_begin(); cpu_hotplug_begin();
+#ifdef CONFIG_X86 ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); if (ret) goto out; @@ -186,6 +187,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_unmap_lsapic(pr->id); goto out; } +#endif
/* * CPU got hot-added, but cpu_data is not initialized yet. Set a flag @@ -457,8 +459,10 @@ static void acpi_processor_remove(struct acpi_device *device) cpu_hotplug_begin();
/* Remove the CPU. */ +#ifdef CONFIG_X86 arch_unregister_cpu(pr->id); acpi_unmap_lsapic(pr->id); +#endif
cpu_hotplug_done(); cpu_maps_update_done(); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3dca36d..fae22f4 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -70,8 +70,10 @@ static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
static int disabled_by_idle_boot_param(void) { +#ifdef CONFIG_X86 return boot_option_idle_override == IDLE_POLL || boot_option_idle_override == IDLE_HALT; +#endif }
/* @@ -118,7 +120,9 @@ static struct dmi_system_id processor_power_dmi_table[] = { static void acpi_safe_halt(void) { if (!tif_need_resched()) { +#ifdef CONFIG_X86 safe_halt(); +#endif local_irq_disable(); } } @@ -433,6 +437,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) } else { continue; } +#ifdef CONFIG_X86 if (cx.type == ACPI_STATE_C1 && (boot_option_idle_override == IDLE_NOMWAIT)) { /* @@ -448,6 +453,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) cx.entry_method = ACPI_CSTATE_HALT; snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); } +#endif } else { snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", cx.address); @@ -748,7 +754,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT) +#ifdef CONFIG_X86 safe_halt(); +#else + ; +#endif else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { inb(cx->address); /* See comment in acpi_idle_do_entry() */ @@ -843,7 +853,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, } }
+#ifdef CONFIG_X86 acpi_unlazy_tlb(smp_processor_id()); +#endif
/* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); @@ -1121,7 +1133,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (!first_run) { dmi_check_system(processor_power_dmi_table); +#ifdef CONFIG_X86 max_cstate = acpi_processor_cstate_check(max_cstate); +#endif if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 000e4e0..80662b1 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -42,7 +42,9 @@
#include <acpi/processor.h>
+#ifdef CONFIG_X86 #include <asm/msr.h> +#endif #include <asm/processor.h> #include <asm/cpufeature.h>
@@ -84,6 +86,7 @@ static struct msr __percpu *msrs;
static bool boost_state(unsigned int cpu) { +#ifdef CONFIG_X86 u32 lo, hi; u64 msr;
@@ -97,6 +100,7 @@ static bool boost_state(unsigned int cpu) msr = lo | ((u64)hi << 32); return !(msr & MSR_K7_HWCR_CPB_DIS); } +#endif return false; }
@@ -106,6 +110,7 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) u32 msr_addr; u64 msr_mask;
+#ifdef CONFIG_X86 switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: msr_addr = MSR_IA32_MISC_ENABLE; @@ -130,6 +135,7 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask) }
wrmsr_on_cpus(cpumask, msr_addr, msrs); +#endif }
static int _store_boost(int val) @@ -185,16 +191,20 @@ cpufreq_freq_attr_rw(cpb);
static int check_est_cpu(unsigned int cpuid) { +#ifdef CONFIG_X86 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
return cpu_has(cpu, X86_FEATURE_EST); +#endif }
static int check_amd_hwpstate_cpu(unsigned int cpuid) { +#ifdef CONFIG_X86 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
return cpu_has(cpu, X86_FEATURE_HW_PSTATE); +#endif }
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) @@ -215,11 +225,12 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) { int i; struct acpi_processor_performance *perf; - +#ifdef CONFIG_X86 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) msr &= AMD_MSR_RANGE; else msr &= INTEL_MSR_RANGE; +#endif
perf = data->acpi_data;
@@ -271,7 +282,9 @@ static void do_drv_read(void *_cmd) switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_AMD_MSR_CAPABLE: +#ifdef CONFIG_X86 rdmsr(cmd->addr.msr.reg, cmd->val, h); +#endif break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, @@ -291,12 +304,16 @@ static void do_drv_write(void *_cmd)
switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: +#ifdef CONFIG_X86 rdmsr(cmd->addr.msr.reg, lo, hi); lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); wrmsr(cmd->addr.msr.reg, lo, hi); +#endif break; case SYSTEM_AMD_MSR_CAPABLE: +#ifdef CONFIG_X86 wrmsr(cmd->addr.msr.reg, cmd->val, 0); +#endif break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, @@ -337,6 +354,7 @@ static u32 get_cur_val(const struct cpumask *mask) return 0;
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { +#ifdef CONFIG_X86 case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; @@ -345,6 +363,7 @@ static u32 get_cur_val(const struct cpumask *mask) cmd.type = SYSTEM_AMD_MSR_CAPABLE; cmd.addr.msr.reg = MSR_AMD_PERF_CTL; break; +#endif case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; @@ -435,6 +454,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, }
switch (data->cpu_feature) { +#ifdef CONFIG_X86 case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_CTL; @@ -445,6 +465,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, cmd.addr.msr.reg = MSR_AMD_PERF_CTL; cmd.val = (u32) perf->states[next_perf_state].control; break; +#endif case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; cmd.addr.io.port = perf->control_register.address; @@ -484,7 +505,7 @@ static unsigned long acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) { struct acpi_processor_performance *perf = data->acpi_data; - +#ifdef CONFIG_X86 if (cpu_khz) { /* search the closest match to cpu_khz */ unsigned int i; @@ -501,11 +522,11 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) } perf->state = perf->state_count-1; return freqn; - } else { - /* assume CPU is at P0... */ - perf->state = 0; - return perf->states[0].core_frequency * 1000; } +#endif + /* assume CPU is at P0... */ + perf->state = 0; + return perf->states[0].core_frequency * 1000; }
static void free_acpi_perf_data(void) @@ -626,6 +647,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) * AL30: A Machine Check Exception (MCE) Occurring during an * Enhanced Intel SpeedStep Technology Ratio Change May Cause * Both Processor Cores to Lock Up. */ +#ifdef CONFIG_X86 if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && @@ -637,6 +659,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) return -ENODEV; } } +#endif return 0; } #endif @@ -648,7 +671,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int cpu = policy->cpu; struct acpi_cpufreq_data *data; unsigned int result = 0; +#ifdef CONFIG_X86 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); +#else + struct cpuinfo_x86 *c = NULL; +#endif struct acpi_processor_performance *perf; #ifdef CONFIG_SMP static int blacklisted; @@ -676,8 +703,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); per_cpu(acfreq_data, cpu) = data;
+#ifdef CONFIG_X86 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; +#endif
result = acpi_processor_register_performance(data->acpi_data, cpu); if (result) @@ -700,13 +729,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) dmi_check_system(sw_any_bug_dmi_table); if (bios_with_sw_any_bug && !policy_is_shared(policy)) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; +#ifdef CONFIG_X86 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); +#endif }
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); +#ifdef CONFIG_X86 cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu)); +#endif policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } @@ -726,12 +759,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: +#ifdef CONFIG_X86 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 == 0xf) { pr_debug("AMD K8 systems must use native drivers.\n"); result = -ENODEV; goto err_unreg; } +#endif pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; @@ -898,6 +933,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static void __init acpi_cpufreq_boost_init(void) { +#ifdef CONFIG_X86 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc();
@@ -917,16 +953,19 @@ static void __init acpi_cpufreq_boost_init(void)
cpu_notifier_register_done(); } +#endif }
static void acpi_cpufreq_boost_exit(void) { +#ifdef CONFIG_X86 if (msrs) { unregister_cpu_notifier(&boost_nb);
msrs_free(msrs); msrs = NULL; } +#endif }
static int __init acpi_cpufreq_init(void) @@ -995,12 +1034,14 @@ MODULE_PARM_DESC(acpi_pstate_strict, late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit);
+#ifdef CONFIG_X86 static const struct x86_cpu_id acpi_cpufreq_ids[] = { X86_FEATURE_MATCH(X86_FEATURE_ACPI), X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), {} }; MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); +#endif
static const struct acpi_device_id processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, },