On 10 March 2015 at 03:12, Alex Shi alex.shi@linaro.org wrote:
hi, Mark,
Just merged the LTS from 3.10.69~3.10.71 to LSK. please review the merge conflict on lsk.
git://git.linaro.org/kernel/linux-linaro-stable.git linux-linaro-lsk-test
Both this and your other merge look good (so long as they test out OK!)
Regards!
commit 42f504e28a169ebfea0e3d9c6e151451800e41d5 Merge: c810fcb 389fb5fb Author: Alex Shi alex.shi@linaro.org Date: Tue Mar 10 09:33:40 2015 +0800
Merge tag 'v3.10.71' into linux-linaro-lsk This is the 3.10.71 stable release Conflicts: arch/arm64/kernel/setup.c
diff --cc arch/arm64/include/asm/cputype.h index ec5e41c,be9b5ca..539bea6 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@@ -94,9 -71,11 +94,11 @@@ static inline unsigned int __attribute_
static inline u32 __attribute_const__ read_cpuid_cachetype(void) {
return read_cpuid(ID_CTR_EL0);
return read_cpuid(CTR_EL0);
}
void cpuinfo_store_cpu(void);
#endif /* __ASSEMBLY__ */
#endif
diff --cc arch/arm64/kernel/setup.c index e87b5fd,7cc551d..9767695 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@@ -41,9 -41,8 +41,10 @@@ #include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> +#include <linux/efi.h>
- #include <linux/personality.h>
+#include <asm/fixmap.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/cputable.h> @@@ -112,95 -98,28 +113,108 @@@ void __init early_print(const char *str printk("%s", buf); }
- struct cpuinfo_arm64 {
struct cpu cpu;
u32 reg_midr;
- };
- static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
- void cpuinfo_store_cpu(void)
- {
struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
info->reg_midr = read_cpuid_id();
- }
-static void __init setup_processor(void) +void __init smp_setup_processor_id(void) {
struct cpu_info *cpu_info;
/*
* clear __my_cpu_offset on boot CPU to avoid hang caused by
* using percpu variable early, for example, lockdep will
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
+}
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{
return phys_id == cpu_logical_map(cpu);
+}
+struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/**
- smp_build_mpidr_hash - Pre-compute shifts required at each affinity
level in order to build a linear index from an
MPIDR value. Resulting algorithm is a collision
free hash carried out through shifting and ORing
- */
+static void __init smp_build_mpidr_hash(void) +{
u32 i, affinity, fs[4], bits[4], ls;
u64 mask = 0;
/*
* Pre-scan the list of MPIDRS and filter out bits that do
* not contribute to affinity levels, ie they never toggle.
*/
for_each_possible_cpu(i)
mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
pr_debug("mask of set bits %#llx\n", mask);
/*
* Find and stash the last and first bit set at all affinity
levels to
* check how many bits are required to represent them.
*/
for (i = 0; i < 4; i++) {
affinity = MPIDR_AFFINITY_LEVEL(mask, i);
/*
* Find the MSB bit and LSB bits position
* to determine how many bits are required
* to express the affinity level.
*/
ls = fls(affinity);
fs[i] = affinity ? ffs(affinity) - 1 : 0;
bits[i] = ls - fs[i];
}
/*
* An index can be created from the MPIDR_EL1 by isolating the
* significant bits at each affinity level and by shifting
* them in order to compress the 32 bits values space to a
* compressed set of values. This is equivalent to hashing
* the MPIDR_EL1 through shifting and ORing. It is a collision free
* hash though not minimal since some levels might contain a number
* of CPUs that is not an exact power of 2 and their bit
* representation might contain holes, eg MPIDR_EL1[7:0] = {0x2,
0x80}.
*/
mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
(bits[1] + bits[0]);
mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
fs[3] - (bits[2] + bits[1] + bits[0]);
mpidr_hash.mask = mask;
mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u]
mask[%#llx] bits[%u]\n",
mpidr_hash.shift_aff[0],
mpidr_hash.shift_aff[1],
mpidr_hash.shift_aff[2],
mpidr_hash.shift_aff[3],
mpidr_hash.mask,
mpidr_hash.bits); /*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc.S
* 4x is an arbitrary value used to warn on a hash table much
bigger
* than expected on most systems. */
if (mpidr_hash_size() > 4 * num_possible_cpus())
pr_warn("Large number of MPIDR hash buckets detected\n");
__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+} +#endif
+static void __init setup_processor(void) +{
struct cpu_info *cpu_info;
u64 features, block;
cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to
continue.\n", @@@ -453,10 -304,8 +469,8 @@@ static int __init arm64_device_init(voi of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } -arch_initcall(arm64_device_init); +arch_initcall_sync(arm64_device_init);
- static DEFINE_PER_CPU(struct cpu, cpu_data);
- static int __init topology_init(void) { int i;
@@@ -499,24 -369,36 +540,37 @@@ static int c_show(struct seq_file *m, v #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif
}
/* dump out the processor features */
seq_puts(m, "Features\t: ");
for (i = 0; hwcap_str[i]; i++)
if (elf_hwcap & (1 << i))
seq_printf(m, "%s ", hwcap_str[i]);
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >>
24);
seq_printf(m, "CPU architecture: AArch64\n");
seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) &
15);
seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) &
0xfff);
seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
loops_per_jiffy / (5000UL/HZ) % 100);
seq_puts(m, "\n");
/*
* Dump out the common processor features in a single line.
* Userspace should read the hwcaps with
getauxval(AT_HWCAP)
* rather than attempting to parse this, but there's a
body of
* software which does already (at least for 32-bit).
*/
seq_puts(m, "Features\t:");
if (personality(current->personality) == PER_LINUX32) {
- #ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++)
if (COMPAT_ELF_HWCAP & (1 << j))
seq_printf(m, " %s",
compat_hwcap_str[j]);
- #endif /* CONFIG_COMPAT */
} else {
for (j = 0; hwcap_str[j]; j++)
if (elf_hwcap & (1 << j))
seq_printf(m, " %s", hwcap_str[j]);
}
seq_puts(m, "\n");
seq_printf(m, "Hardware\t: %s\n", machine_name);
seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
seq_printf(m, "CPU architecture: 8\n");
seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) &
0xf));
seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) &
0xfff));
seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
} return 0;
}
diff --cc arch/arm64/kernel/smp.c index 0ac31a5,b0a8703..9a3c7ef --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@@ -152,17 -187,24 +152,22 @@@ asmlinkage void __cpuinit secondary_sta preempt_disable(); trace_hardirqs_off();
/*
* Let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
write_pen_release(INVALID_HWID);
if (cpu_ops[cpu]->cpu_postboot)
cpu_ops[cpu]->cpu_postboot(); /*
* Synchronise with the boot thread.
* Enable GIC and timers. */
raw_spin_lock(&boot_lock);
raw_spin_unlock(&boot_lock);
notify_cpu_starting(cpu);
smp_store_cpu_info(cpu); /*
* Log the CPU info before it is marked online and might get read.
*/
cpuinfo_store_cpu();
/* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue.
-- Thanks Alex