Rather than add earlier prototypes of specific ctxt_has_ helpers let's just pull all their definitions to the top of sysreg-sr.h so they're all available to all the individual save/restore functions.
Signed-off-by: Mark Brown broonie@kernel.org --- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 82 +++++++++++++++--------------- 1 file changed, 40 insertions(+), 42 deletions(-)
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index b9cff893bbe0..b795a7a87a93 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -16,8 +16,6 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h>
-static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); - static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) { struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; @@ -28,6 +26,46 @@ static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) return vcpu; }
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); + + return kvm_has_mte(kern_hyp_va(vcpu->kvm)); +} + +static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_S1PIE)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_s1pie(kern_hyp_va(vcpu->kvm)); +} + +static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_TCR2)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_tcr2(kern_hyp_va(vcpu->kvm)); +} + +static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!system_supports_poe()) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_s1poe(kern_hyp_va(vcpu->kvm)); +} + static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) { return host_data_ptr(host_ctxt) != ctxt; @@ -69,46 +107,6 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); }
-static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); - - return kvm_has_mte(kern_hyp_va(vcpu->kvm)); -} - -static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu; - - if (!cpus_have_final_cap(ARM64_HAS_S1PIE)) - return false; - - vcpu = ctxt_to_vcpu(ctxt); - return kvm_has_s1pie(kern_hyp_va(vcpu->kvm)); -} - -static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu; - - if (!cpus_have_final_cap(ARM64_HAS_TCR2)) - return false; - - vcpu = ctxt_to_vcpu(ctxt); - return kvm_has_tcr2(kern_hyp_va(vcpu->kvm)); -} - -static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu; - - if (!system_supports_poe()) - return false; - - vcpu = ctxt_to_vcpu(ctxt); - return kvm_has_s1poe(kern_hyp_va(vcpu->kvm)); -} - static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);