On Tue, 23 Dec 2025 at 01:22, Mark Brown broonie@kernel.org wrote:
Rather than add earlier prototypes of specific ctxt_has_ helpers let's just pull all their definitions to the top of sysreg-sr.h so they're all available to all the individual save/restore functions.
Signed-off-by: Mark Brown broonie@kernel.org
Reviewed-by: Fuad Tabba tabba@google.com
Cheers, /fuad
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 84 +++++++++++++++--------------- 1 file changed, 41 insertions(+), 43 deletions(-)
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index a17cbe7582de..5624fd705ae3 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -16,8 +16,6 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h>
-static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) { struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; @@ -28,47 +26,6 @@ static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) return vcpu; }
-static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) -{
return host_data_ptr(host_ctxt) != ctxt;-}
-static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) -{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))return &vcpu->arch.external_mdscr_el1;return &ctxt_sys_reg(ctxt, MDSCR_EL1);-}
-static inline u64 ctxt_midr_el1(struct kvm_cpu_context *ctxt) -{
struct kvm *kvm = kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm);if (!(ctxt_is_guest(ctxt) &&test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)))return read_cpuid_id();return kvm_read_vm_id_reg(kvm, SYS_MIDR_EL1);-}
-static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) -{
*ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);// POR_EL0 can affect uaccess, so must be saved/restored early.if (ctxt_has_s1poe(ctxt))ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);-}
-static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) -{
ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);-}
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) { struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); @@ -131,6 +88,47 @@ static inline bool ctxt_has_sctlr2(struct kvm_cpu_context *ctxt) return kvm_has_sctlr2(kern_hyp_va(vcpu->kvm)); }
+static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) +{
return host_data_ptr(host_ctxt) != ctxt;+}
+static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) +{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))return &vcpu->arch.external_mdscr_el1;return &ctxt_sys_reg(ctxt, MDSCR_EL1);+}
+static inline u64 ctxt_midr_el1(struct kvm_cpu_context *ctxt) +{
struct kvm *kvm = kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm);if (!(ctxt_is_guest(ctxt) &&test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)))return read_cpuid_id();return kvm_read_vm_id_reg(kvm, SYS_MIDR_EL1);+}
+static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) +{
*ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);// POR_EL0 can affect uaccess, so must be saved/restored early.if (ctxt_has_s1poe(ctxt))ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);+}
+static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) +{
ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);+}
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
-- 2.47.3