This is a note to let you know that I've just added the patch titled
[Variant 1/Spectre-v1] arm64: Use pointer masking to limit uaccess speculation
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Robin Murphy <robin.murphy(a)arm.com>
Date: Mon, 5 Feb 2018 15:34:19 +0000
Subject: [Variant 1/Spectre-v1] arm64: Use pointer masking to limit uaccess speculation
From: Robin Murphy <robin.murphy(a)arm.com>
Commit 4d8efc2d5ee4 upstream.
Similarly to x86, mitigate speculation past an access_ok() check by
masking the pointer against the address limit before use.
Even if we don't expect speculative writes per se, it is plausible that
a CPU may still speculate at least as far as fetching a cache line for
writing, hence we also harden put_user() and clear_user() for peace of
mind.
Signed-off-by: Robin Murphy <robin.murphy(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/include/asm/uaccess.h | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -216,6 +216,26 @@ static inline void uaccess_enable_not_ua
}
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
+ * current addr_limit.
+ */
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+ void __user *safe_ptr;
+
+ asm volatile(
+ " bics xzr, %1, %2\n"
+ " csel %0, %1, xzr, eq\n"
+ : "=&r" (safe_ptr)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -285,7 +305,7 @@ do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
- __get_user((x), __p) : \
+ __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \
((x) = 0, -EFAULT); \
})
@@ -349,7 +369,7 @@ do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
- __put_user((x), __p) : \
+ __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \
-EFAULT; \
})
@@ -365,7 +385,7 @@ extern unsigned long __must_check __clea
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __clear_user(to, n);
+ n = __clear_user(__uaccess_mask_ptr(to), n);
return n;
}
Patches currently in stable-queue which might be from robin.murphy(a)arm.com are
queue-4.14/arm64-make-user_ds-an-inclusive-limit.patch
queue-4.14/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
queue-4.14/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.14/firmware-psci-expose-psci-conduit.patch
queue-4.14/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.14/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
queue-4.14/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.14/arm64-implement-array_index_mask_nospec.patch
This is a note to let you know that I've just added the patch titled
[Variant 3/Meltdown] arm64: Turn on KPTI only on CPUs that need it
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Jayachandran C <jnair(a)caviumnetworks.com>
Date: Fri, 19 Jan 2018 04:22:48 -0800
Subject: [Variant 3/Meltdown] arm64: Turn on KPTI only on CPUs that need it
From: Jayachandran C <jnair(a)caviumnetworks.com>
Commit 0ba2e29c7fc1 upstream.
Whitelist Broadcom Vulcan/Cavium ThunderX2 processors in
unmap_kernel_at_el0(). These CPUs are not vulnerable to
CVE-2017-5754 and do not need KPTI when KASLR is off.
Acked-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Jayachandran C <jnair(a)caviumnetworks.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kernel/cpufeature.c | 7 +++++++
1 file changed, 7 insertions(+)
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -816,6 +816,13 @@ static bool unmap_kernel_at_el0(const st
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+ case MIDR_CAVIUM_THUNDERX2:
+ case MIDR_BRCM_VULCAN:
+ return false;
+ }
+
/* Defer to CPU feature registers */
return !cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV3_SHIFT);
Patches currently in stable-queue which might be from jnair(a)caviumnetworks.com are
queue-4.14/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
queue-4.14/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
queue-4.14/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
This is a note to let you know that I've just added the patch titled
[Variant 3/Meltdown] arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Will Deacon <will.deacon(a)arm.com>
Date: Tue, 14 Nov 2017 14:33:28 +0000
Subject: [Variant 3/Meltdown] arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
From: Will Deacon <will.deacon(a)arm.com>
Commit 18011eac28c7 upstream.
When unmapping the kernel at EL0, we use tpidrro_el0 as a scratch register
during exception entry from native tasks and subsequently zero it in
the kernel_ventry macro. We can therefore avoid zeroing tpidrro_el0
in the context-switch path for native tasks using the entry trampoline.
Reviewed-by: Mark Rutland <mark.rutland(a)arm.com>
Tested-by: Laura Abbott <labbott(a)redhat.com>
Tested-by: Shanker Donthineni <shankerd(a)codeaurora.org>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kernel/process.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -314,16 +314,14 @@ void tls_preserve_current_state(void)
static void tls_thread_switch(struct task_struct *next)
{
- unsigned long tpidr, tpidrro;
-
tls_preserve_current_state();
- tpidr = *task_user_tls(next);
- tpidrro = is_compat_thread(task_thread_info(next)) ?
- next->thread.tp_value : 0;
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.tp_value, tpidrro_el0);
+ else if (!arm64_kernel_unmapped_at_el0())
+ write_sysreg(0, tpidrro_el0);
- write_sysreg(tpidr, tpidr_el0);
- write_sysreg(tpidrro, tpidrro_el0);
+ write_sysreg(*task_user_tls(next), tpidr_el0);
}
/* Restore the UAO state depending on next's addr_limit */
Patches currently in stable-queue which might be from will.deacon(a)arm.com are
queue-4.14/arm64-make-user_ds-an-inclusive-limit.patch
queue-4.14/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch
queue-4.14/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch
queue-4.14/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
queue-4.14/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch
queue-4.14/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch
queue-4.14/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch
queue-4.14/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch
queue-4.14/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.14/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.14/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.14/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.14/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch
queue-4.14/firmware-psci-expose-psci-conduit.patch
queue-4.14/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.14/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.14/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.14/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.14/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch
queue-4.14/arm64-mm-allocate-asids-in-pairs.patch
queue-4.14/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch
queue-4.14/arm64-use-ret-instruction-for-exiting-the-trampoline.patch
queue-4.14/arm64-futex-mask-__user-pointers-prior-to-dereference.patch
queue-4.14/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch
queue-4.14/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.14/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch
queue-4.14/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.14/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.14/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch
queue-4.14/arm64-mm-use-non-global-mappings-for-kernel-space.patch
queue-4.14/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.14/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.14/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch
queue-4.14/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch
queue-4.14/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
queue-4.14/.arm64-add-software-workaround-for-falkor-erratum-1041.patch.swp
queue-4.14/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch
queue-4.14/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.14/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch
queue-4.14/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch
queue-4.14/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch
queue-4.14/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch
queue-4.14/arm64-mm-rename-post_ttbr0_update_workaround.patch
queue-4.14/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch
queue-4.14/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.14/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.14/arm64-move-task_-definitions-to-asm-processor.h.patch
queue-4.14/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch
queue-4.14/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch
queue-4.14/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch
queue-4.14/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.14/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
queue-4.14/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch
queue-4.14/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.14/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch
queue-4.14/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.14/arm64-implement-branch-predictor-hardening-for-falkor.patch
queue-4.14/arm64-kconfig-add-config_unmap_kernel_at_el0.patch
queue-4.14/arm64-add-software-workaround-for-falkor-erratum-1041.patch
queue-4.14/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.14/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
queue-4.14/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch
queue-4.14/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch
queue-4.14/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.14/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.14/arm64-implement-array_index_mask_nospec.patch
queue-4.14/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.14/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.14/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.14/arm64-define-cputype-macros-for-falkor-cpu.patch
queue-4.14/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch
queue-4.14/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.14/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch
queue-4.14/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.14/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch
queue-4.14/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: Run enable method for errata work arounds on late CPUs
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Date: Wed, 17 Jan 2018 17:42:20 +0000
Subject: [Variant 2/Spectre-v2] arm64: Run enable method for errata work arounds on late CPUs
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Commit 55b35d070c25 upstream.
When a CPU is brought up after we have finalised the system
wide capabilities (i.e, features and errata), we make sure the
new CPU doesn't need a new errata work around which has not been
detected already. However we don't run enable() method on the new
CPU for the errata work arounds already detected. This could
cause the new CPU running without potential work arounds.
It is upto the "enable()" method to decide if this CPU should
do something about the errata.
Fixes: commit 6a6efbb45b7d95c84 ("arm64: Verify CPU errata work arounds on hotplugged CPU")
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Cc: Andre Przywara <andre.przywara(a)arm.com>
Cc: Dave Martin <dave.martin(a)arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/kernel/cpu_errata.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -221,15 +221,18 @@ void verify_local_cpu_errata_workarounds
{
const struct arm64_cpu_capabilities *caps = arm64_errata;
- for (; caps->matches; caps++)
- if (!cpus_have_cap(caps->capability) &&
- caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ for (; caps->matches; caps++) {
+ if (cpus_have_cap(caps->capability)) {
+ if (caps->enable)
+ caps->enable((void *)caps);
+ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
smp_processor_id(),
caps->desc ? : "an erratum");
cpu_die_early();
}
+ }
}
void update_cpu_errata_workarounds(void)
Patches currently in stable-queue which might be from suzuki.poulose(a)arm.com are
queue-4.14/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch
queue-4.14/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.14/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch
queue-4.14/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch
queue-4.14/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
This is a note to let you know that I've just added the patch titled
arm64: move TASK_* definitions to <asm/processor.h>
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-move-task_-definitions-to-asm-processor.h.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Yury Norov <ynorov(a)caviumnetworks.com>
Date: Thu, 31 Aug 2017 11:30:50 +0300
Subject: arm64: move TASK_* definitions to <asm/processor.h>
From: Yury Norov <ynorov(a)caviumnetworks.com>
Commit eef94a3d09aa upstream.
ILP32 series [1] introduces the dependency on <asm/is_compat.h> for
TASK_SIZE macro. Which in turn requires <asm/thread_info.h>, and
<asm/thread_info.h> include <asm/memory.h>, giving a circular dependency,
because TASK_SIZE is currently located in <asm/memory.h>.
In other architectures, TASK_SIZE is defined in <asm/processor.h>, and
moving TASK_SIZE there fixes the problem.
Discussion: https://patchwork.kernel.org/patch/9929107/
[1] https://github.com/norov/linux/tree/ilp32-next
CC: Will Deacon <will.deacon(a)arm.com>
CC: Laura Abbott <labbott(a)redhat.com>
Cc: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Cc: Catalin Marinas <catalin.marinas(a)arm.com>
Cc: James Morse <james.morse(a)arm.com>
Suggested-by: Mark Rutland <mark.rutland(a)arm.com>
Signed-off-by: Yury Norov <ynorov(a)caviumnetworks.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/include/asm/memory.h | 15 ---------------
arch/arm64/include/asm/processor.h | 21 +++++++++++++++++++++
arch/arm64/kernel/entry.S | 2 +-
3 files changed, 22 insertions(+), 16 deletions(-)
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -61,8 +61,6 @@
* KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
- * TASK_SIZE - the maximum size of a user space task.
- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) - \
@@ -77,19 +75,6 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 UL(0x100000000)
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#else
-#define TASK_SIZE TASK_SIZE_64
-#endif /* CONFIG_COMPAT */
-
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
#define KERNEL_START _text
#define KERNEL_END _end
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,6 +19,10 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
+#define TASK_SIZE_64 (UL(1) << VA_BITS)
+
+#ifndef __ASSEMBLY__
+
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -37,6 +41,22 @@
#include <asm/ptrace.h>
#include <asm/types.h>
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ */
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 UL(0x100000000)
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif /* CONFIG_COMPAT */
+
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
@@ -194,4 +214,5 @@ static inline void spin_lock_prefetch(co
int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,7 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
-#include <asm/memory.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
Patches currently in stable-queue which might be from ynorov(a)caviumnetworks.com are
queue-4.14/arm64-move-task_-definitions-to-asm-processor.h.patch
This is a note to let you know that I've just added the patch titled
[Variant 2/Spectre-v2] arm64: Move post_ttbr_update_workaround to C code
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
arm64-move-post_ttbr_update_workaround-to-c-code.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Feb 14 14:44:54 CET 2018
From: Marc Zyngier <marc.zyngier(a)arm.com>
Date: Tue, 2 Jan 2018 18:19:39 +0000
Subject: [Variant 2/Spectre-v2] arm64: Move post_ttbr_update_workaround to C code
From: Marc Zyngier <marc.zyngier(a)arm.com>
Commit 95e3de3590e3 upstream.
We will soon need to invoke a CPU-specific function pointer after changing
page tables, so move post_ttbr_update_workaround out into C code to make
this possible.
Signed-off-by: Marc Zyngier <marc.zyngier(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/arm64/include/asm/assembler.h | 23 -----------------------
arch/arm64/kernel/entry.S | 2 +-
arch/arm64/mm/context.c | 9 +++++++++
arch/arm64/mm/proc.S | 3 +--
4 files changed, 11 insertions(+), 26 deletions(-)
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -481,29 +481,6 @@ alternative_endif
mrs \rd, sp_el0
.endm
-/*
- * Errata workaround post TTBRx_EL1 update.
- */
- .macro post_ttbr_update_workaround
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
- ic iallu
- dsb nsh
- isb
-alternative_else_nop_endif
-#endif
- .endm
-
-/**
- * Errata workaround prior to disable MMU. Insert an ISB immediately prior
- * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
- */
- .macro pre_disable_mmu_workaround
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
- isb
-#endif
- .endm
-
.macro pte_to_phys, phys, pte
and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
.endm
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -275,7 +275,7 @@ alternative_else_nop_endif
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
* corruption).
*/
- post_ttbr_update_workaround
+ bl post_ttbr_update_workaround
.endif
1:
.if \el != 0
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -235,6 +235,15 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+ asm(ALTERNATIVE("nop; nop; nop",
+ "ic iallu; dsb nsh; isb",
+ ARM64_WORKAROUND_CAVIUM_27456,
+ CONFIG_CAVIUM_ERRATUM_27456));
+}
+
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -148,8 +148,7 @@ ENTRY(cpu_do_switch_mm)
isb
msr ttbr0_el1, x0 // now update TTBR0
isb
- post_ttbr_update_workaround
- ret
+ b post_ttbr_update_workaround // Back to C code...
ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "awx"
Patches currently in stable-queue which might be from marc.zyngier(a)arm.com are
queue-4.14/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
queue-4.14/arm64-move-bp-hardening-to-check_and_switch_context.patch
queue-4.14/arm-arm64-kvm-advertise-smccc-v1.1.patch
queue-4.14/arm64-move-post_ttbr_update_workaround-to-c-code.patch
queue-4.14/firmware-psci-expose-psci-conduit.patch
queue-4.14/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch
queue-4.14/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch
queue-4.14/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch
queue-4.14/firmware-psci-expose-smccc-version-through-psci_ops.patch
queue-4.14/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch
queue-4.14/arm-arm64-kvm-add-psci_version-helper.patch
queue-4.14/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch
queue-4.14/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch
queue-4.14/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch
queue-4.14/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.14/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch
queue-4.14/arm-arm64-kvm-implement-psci-1.0-support.patch
queue-4.14/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch
queue-4.14/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
queue-4.14/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
queue-4.14/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch
queue-4.14/arm64-kvm-make-psci_version-a-fast-path.patch
queue-4.14/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch
queue-4.14/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch
queue-4.14/arm-arm64-kvm-consolidate-the-psci-include-files.patch
queue-4.14/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch
queue-4.14/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch
queue-4.14/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch
queue-4.14/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch