The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 52f73c383b2418f2d31b798e765ae7d596c35021 Mon Sep 17 00:00:00 2001
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Date: Mon, 13 Jan 2020 23:30:23 +0000
Subject: [PATCH] arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly
We detect the absence of FP/SIMD after an incapable CPU is brought up,
and by then we have kernel threads running already with TIF_FOREIGN_FPSTATE set
which could be set for early userspace applications (e.g, modprobe triggered
from initramfs) and init. This could cause the applications to loop forever in
do_nofity_resume() as we never clear the TIF flag, once we now know that
we don't support FP.
Fix this by making sure that we clear the TIF_FOREIGN_FPSTATE flag
for tasks which may have them set, as we would have done in the normal
case, but avoiding touching the hardware state (since we don't support any).
Also to make sure we handle the cases seemlessly we categorise the
helper functions to two :
1) Helpers for common core code, which calls into take appropriate
actions without knowing the current FPSIMD state of the CPU/task.
e.g fpsimd_restore_current_state(), fpsimd_flush_task_state(),
fpsimd_save_and_flush_cpu_state().
We bail out early for these functions, taking any appropriate actions
(e.g, clearing the TIF flag) where necessary to hide the handling
from core code.
2) Helpers used when the presence of FP/SIMD is apparent.
i.e, save/restore the FP/SIMD register state, modify the CPU/task
FP/SIMD state.
e.g,
fpsimd_save(), task_fpsimd_load() - save/restore task FP/SIMD registers
fpsimd_bind_task_to_cpu() \
- Update the "state" metadata for CPU/task.
fpsimd_bind_state_to_cpu() /
fpsimd_update_current_state() - Update the fp/simd state for the current
task from memory.
These must not be called in the absence of FP/SIMD. Put in a WARNING
to make sure they are not invoked in the absence of FP/SIMD.
KVM also uses the TIF_FOREIGN_FPSTATE flag to manage the FP/SIMD state
on the CPU. However, without FP/SIMD support we trap all accesses and
inject undefined instruction. Thus we should never "load" guest state.
Add a sanity check to make sure this is valid.
Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD")
Cc: Will Deacon <will(a)kernel.org>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Reviewed-by: Ard Biesheuvel <ardb(a)kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas(a)arm.com>
Acked-by: Marc Zyngier <maz(a)kernel.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Will Deacon <will(a)kernel.org>
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3eb338f14386..94289d126993 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
*/
static void task_fpsimd_load(void)
{
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
@@ -289,6 +290,7 @@ static void fpsimd_save(void)
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
last->st = ¤t->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = current->thread.sve_vl;
@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
*/
void fpsimd_restore_current_state(void)
{
- if (!system_supports_fpsimd())
+ /*
+ * For the tasks that were created before we detected the absence of
+ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+ * e.g, init. This could be then inherited by the children processes.
+ * If we later detect that the system doesn't support FP/SIMD,
+ * we must clear the flag for all the tasks to indicate that the
+ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+ * do_notify_resume().
+ */
+ if (!system_supports_fpsimd()) {
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
+ }
get_cpu_fpsimd_context();
@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
- if (!system_supports_fpsimd())
+ if (WARN_ON(!system_supports_fpsimd()))
return;
get_cpu_fpsimd_context();
@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
-
+ /*
+ * If we don't support fpsimd, bail out after we have
+ * reset the fpsimd_cpu for this task and clear the
+ * FPSTATE.
+ */
+ if (!system_supports_fpsimd())
+ return;
barrier();
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
*/
static void fpsimd_flush_cpu_state(void)
{
+ WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
*/
void fpsimd_save_and_flush_cpu_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
WARN_ON(preemptible());
__get_cpu_fpsimd_context();
fpsimd_save();
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 72fbbd86eb5e..e5816d885761 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -28,7 +28,15 @@
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ /*
+ * When the system doesn't support FP/SIMD, we cannot rely on
+ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
+ * abort on the very first access to FP and thus we should never
+ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
+ * trap the accesses.
+ */
+ if (!system_supports_fpsimd() ||
+ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 52f73c383b2418f2d31b798e765ae7d596c35021 Mon Sep 17 00:00:00 2001
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Date: Mon, 13 Jan 2020 23:30:23 +0000
Subject: [PATCH] arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly
We detect the absence of FP/SIMD after an incapable CPU is brought up,
and by then we have kernel threads running already with TIF_FOREIGN_FPSTATE set
which could be set for early userspace applications (e.g, modprobe triggered
from initramfs) and init. This could cause the applications to loop forever in
do_nofity_resume() as we never clear the TIF flag, once we now know that
we don't support FP.
Fix this by making sure that we clear the TIF_FOREIGN_FPSTATE flag
for tasks which may have them set, as we would have done in the normal
case, but avoiding touching the hardware state (since we don't support any).
Also to make sure we handle the cases seemlessly we categorise the
helper functions to two :
1) Helpers for common core code, which calls into take appropriate
actions without knowing the current FPSIMD state of the CPU/task.
e.g fpsimd_restore_current_state(), fpsimd_flush_task_state(),
fpsimd_save_and_flush_cpu_state().
We bail out early for these functions, taking any appropriate actions
(e.g, clearing the TIF flag) where necessary to hide the handling
from core code.
2) Helpers used when the presence of FP/SIMD is apparent.
i.e, save/restore the FP/SIMD register state, modify the CPU/task
FP/SIMD state.
e.g,
fpsimd_save(), task_fpsimd_load() - save/restore task FP/SIMD registers
fpsimd_bind_task_to_cpu() \
- Update the "state" metadata for CPU/task.
fpsimd_bind_state_to_cpu() /
fpsimd_update_current_state() - Update the fp/simd state for the current
task from memory.
These must not be called in the absence of FP/SIMD. Put in a WARNING
to make sure they are not invoked in the absence of FP/SIMD.
KVM also uses the TIF_FOREIGN_FPSTATE flag to manage the FP/SIMD state
on the CPU. However, without FP/SIMD support we trap all accesses and
inject undefined instruction. Thus we should never "load" guest state.
Add a sanity check to make sure this is valid.
Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD")
Cc: Will Deacon <will(a)kernel.org>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Reviewed-by: Ard Biesheuvel <ardb(a)kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas(a)arm.com>
Acked-by: Marc Zyngier <maz(a)kernel.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Will Deacon <will(a)kernel.org>
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3eb338f14386..94289d126993 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
*/
static void task_fpsimd_load(void)
{
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
@@ -289,6 +290,7 @@ static void fpsimd_save(void)
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
last->st = ¤t->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = current->thread.sve_vl;
@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
*/
void fpsimd_restore_current_state(void)
{
- if (!system_supports_fpsimd())
+ /*
+ * For the tasks that were created before we detected the absence of
+ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+ * e.g, init. This could be then inherited by the children processes.
+ * If we later detect that the system doesn't support FP/SIMD,
+ * we must clear the flag for all the tasks to indicate that the
+ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+ * do_notify_resume().
+ */
+ if (!system_supports_fpsimd()) {
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
+ }
get_cpu_fpsimd_context();
@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
- if (!system_supports_fpsimd())
+ if (WARN_ON(!system_supports_fpsimd()))
return;
get_cpu_fpsimd_context();
@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
-
+ /*
+ * If we don't support fpsimd, bail out after we have
+ * reset the fpsimd_cpu for this task and clear the
+ * FPSTATE.
+ */
+ if (!system_supports_fpsimd())
+ return;
barrier();
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
*/
static void fpsimd_flush_cpu_state(void)
{
+ WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
*/
void fpsimd_save_and_flush_cpu_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
WARN_ON(preemptible());
__get_cpu_fpsimd_context();
fpsimd_save();
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 72fbbd86eb5e..e5816d885761 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -28,7 +28,15 @@
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ /*
+ * When the system doesn't support FP/SIMD, we cannot rely on
+ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
+ * abort on the very first access to FP and thus we should never
+ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
+ * trap the accesses.
+ */
+ if (!system_supports_fpsimd() ||
+ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3837407c1aa1101ed5e214c7d6041e7a23335c6e Mon Sep 17 00:00:00 2001
From: Eric Auger <eric.auger(a)redhat.com>
Date: Fri, 24 Jan 2020 15:25:32 +0100
Subject: [PATCH] KVM: arm64: pmu: Don't increment SW_INCR if PMCR.E is unset
The specification says PMSWINC increments PMEVCNTR<n>_EL1 by 1
if PMEVCNTR<n>_EL0 is enabled and configured to count SW_INCR.
For PMEVCNTR<n>_EL0 to be enabled, we need both PMCNTENSET to
be set for the corresponding event counter but we also need
the PMCR.E bit to be set.
Fixes: 7a0adc7064b8 ("arm64: KVM: Add access handler for PMSWINC register")
Signed-off-by: Eric Auger <eric.auger(a)redhat.com>
Signed-off-by: Marc Zyngier <maz(a)kernel.org>
Reviewed-by: Andrew Murray <andrew.murray(a)arm.com>
Acked-by: Marc Zyngier <maz(a)kernel.org>
Link: https://lore.kernel.org/r/20200124142535.29386-2-eric.auger@redhat.com
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 8731dfeced8b..c3f8b059881e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -486,6 +486,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
if (val == 0)
return;
+ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ return;
+
enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
if (!(val & BIT(i)))
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3837407c1aa1101ed5e214c7d6041e7a23335c6e Mon Sep 17 00:00:00 2001
From: Eric Auger <eric.auger(a)redhat.com>
Date: Fri, 24 Jan 2020 15:25:32 +0100
Subject: [PATCH] KVM: arm64: pmu: Don't increment SW_INCR if PMCR.E is unset
The specification says PMSWINC increments PMEVCNTR<n>_EL1 by 1
if PMEVCNTR<n>_EL0 is enabled and configured to count SW_INCR.
For PMEVCNTR<n>_EL0 to be enabled, we need both PMCNTENSET to
be set for the corresponding event counter but we also need
the PMCR.E bit to be set.
Fixes: 7a0adc7064b8 ("arm64: KVM: Add access handler for PMSWINC register")
Signed-off-by: Eric Auger <eric.auger(a)redhat.com>
Signed-off-by: Marc Zyngier <maz(a)kernel.org>
Reviewed-by: Andrew Murray <andrew.murray(a)arm.com>
Acked-by: Marc Zyngier <maz(a)kernel.org>
Link: https://lore.kernel.org/r/20200124142535.29386-2-eric.auger@redhat.com
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 8731dfeced8b..c3f8b059881e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -486,6 +486,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
if (val == 0)
return;
+ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ return;
+
enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
if (!(val & BIT(i)))
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 7559950aef1ab8792c50797c6c5c7c5150a02460 Mon Sep 17 00:00:00 2001
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Date: Mon, 13 Jan 2020 23:30:20 +0000
Subject: [PATCH] arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly
We set the compat_elf_hwcap bits unconditionally on arm64 to
include the VFP and NEON support. However, the FP/SIMD unit
is optional on Arm v8 and thus could be missing. We already
handle this properly in the kernel, but still advertise to
the COMPAT applications that the VFP is available. Fix this
to make sure we only advertise when we really have them.
Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD")
Cc: Will Deacon <will(a)kernel.org>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Reviewed-by: Ard Biesheuvel <ardb(a)kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Will Deacon <will(a)kernel.org>
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2fc9f18e2d2d..c008164b0848 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
@@ -1597,6 +1595,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.match_list = list, \
}
+#define HWCAP_CAP_MATCH(match, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = match, \
+ }
+
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
@@ -1670,8 +1674,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{},
};
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ /*
+ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+ * in line with that of arm32 as in vfp_init(). We make sure that the
+ * check is future proof, by making sure value is non-zero.
+ */
+ u32 mvfr1;
+
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+ else
+ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
+ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 7559950aef1ab8792c50797c6c5c7c5150a02460 Mon Sep 17 00:00:00 2001
From: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Date: Mon, 13 Jan 2020 23:30:20 +0000
Subject: [PATCH] arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly
We set the compat_elf_hwcap bits unconditionally on arm64 to
include the VFP and NEON support. However, the FP/SIMD unit
is optional on Arm v8 and thus could be missing. We already
handle this properly in the kernel, but still advertise to
the COMPAT applications that the VFP is available. Fix this
to make sure we only advertise when we really have them.
Fixes: 82e0191a1aa11abf ("arm64: Support systems without FP/ASIMD")
Cc: Will Deacon <will(a)kernel.org>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Reviewed-by: Ard Biesheuvel <ardb(a)kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas(a)arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Will Deacon <will(a)kernel.org>
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2fc9f18e2d2d..c008164b0848 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
@@ -1597,6 +1595,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.match_list = list, \
}
+#define HWCAP_CAP_MATCH(match, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = match, \
+ }
+
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
@@ -1670,8 +1674,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{},
};
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ /*
+ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+ * in line with that of arm32 as in vfp_init(). We make sure that the
+ * check is future proof, by making sure value is non-zero.
+ */
+ u32 mvfr1;
+
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+ else
+ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
+ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),