[ Upstream commit 62021cc36add7b2c015b837f7893f2fb4b8c2586 ]
Now that we are explicitly telling the host FP code which register state it needs to save we can remove the manipulation of TIF_SVE from the KVM code, simplifying it and allowing us to optimise our handling of normal tasks. Remove the manipulation of TIF_SVE from KVM and instead rely on to_save to ensure we save the correct data for it.
There should be no functional or performance impact from this change.
Signed-off-by: Mark Brown broonie@kernel.org Reviewed-by: Catalin Marinas catalin.marinas@arm.com Reviewed-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20221115094640.112848-5-broonie@kernel.org Signed-off-by: Will Deacon will@kernel.org [ Mark: trivial backport ] Signed-off-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Mark Brown broonie@kernel.org --- arch/arm64/kernel/fpsimd.c | 40 ++++++++++++++++------------------------ arch/arm64/kvm/fpsimd.c | 3 --- 2 files changed, 16 insertions(+), 27 deletions(-)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 105b8aa0c038..e8f10daaa0d7 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -318,7 +318,13 @@ static void task_fpsimd_load(void)
/* * Ensure FPSIMD/SVE storage in memory for the loaded context is up to - * date with respect to the CPU registers. + * date with respect to the CPU registers. Note carefully that the + * current context is the context last bound to the CPU stored in + * last, if KVM is involved this may be the guest VM context rather + * than the host thread for the VM pointed to by current. This means + * that we must always reference the state storage via last rather + * than via current, if we are saving KVM state then it will have + * ensured that the type of registers to save is set in last->to_save. */ static void fpsimd_save(void) { @@ -334,9 +340,15 @@ static void fpsimd_save(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return;
- if (IS_ENABLED(CONFIG_ARM64_SVE) && - test_thread_flag(TIF_SVE)) { - if (WARN_ON(sve_get_vl() != last->sve_vl)) { + if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) || + last->to_save == FP_STATE_SVE) { + save_sve_regs = true; + vl = last->sve_vl; + } + + if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { + /* Get the configured VL from RDVL, will account for SM */ + if (WARN_ON(sve_get_vl() != vl)) { /* * Can't save the user regs, so current would * re-enter user with corrupt state. @@ -347,26 +359,6 @@ static void fpsimd_save(void) } }
- if (test_thread_flag(TIF_SVE)) { - save_sve_regs = true; - vl = last->sve_vl; - } - - /* - * Validate that an explicitly specified state to save is - * consistent with the task state. - */ - switch (last->to_save) { - case FP_STATE_CURRENT: - break; - case FP_STATE_FPSIMD: - WARN_ON_ONCE(save_sve_regs); - break; - case FP_STATE_SVE: - WARN_ON_ONCE(!save_sve_regs); - break; - } - if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) { sve_save_state((char *)last->sve_state + sve_ffr_offset(last->sve_vl), diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 54a31c97eb7a..2e0f44f4c470 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -110,7 +110,6 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) &vcpu->arch.fp_type, fp_type);
clear_thread_flag(TIF_FOREIGN_FPSTATE); - update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); } }
@@ -151,7 +150,5 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); }
- update_thread_flag(TIF_SVE, 0); - local_irq_restore(flags); }