From: Michael Ellerman mpe@ellerman.id.au
[ Upstream commit 95839225639ba7c3d8d7231b542728dcf222bf2d ]
Commit a21d1becaa3f ("powerpc: Reintroduce is_kvm_guest() as a fast-path check") added is_kvm_guest() and changed kvm_para_available() to use it.
is_kvm_guest() checks a static key, kvm_guest, and that static key is set in check_kvm_guest().
The problem is check_kvm_guest() is only called on pseries, and even then only in some configurations. That means is_kvm_guest() always returns false on all non-pseries and some pseries depending on configuration. That's a bug.
For PR KVM guests this is noticable because they no longer do live patching of themselves, which can be detected by the omission of a message in dmesg such as:
KVM: Live patching for a fast VM worked
To fix it make check_kvm_guest() an initcall, to ensure it's always called at boot. It needs to be core so that it runs before kvm_guest_init() which is postcore. To be an initcall it needs to return int, where 0 means success, so update that.
We still call it manually in pSeries_smp_probe(), because that runs before init calls are run.
Fixes: a21d1becaa3f ("powerpc: Reintroduce is_kvm_guest() as a fast-path check") Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20210623130514.2543232-1-mpe@ellerman.id.au Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/include/asm/kvm_guest.h | 4 ++-- arch/powerpc/kernel/firmware.c | 10 ++++++---- arch/powerpc/platforms/pseries/smp.c | 4 +++- 3 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h index 2fca299f7e19..c63105d2c9e7 100644 --- a/arch/powerpc/include/asm/kvm_guest.h +++ b/arch/powerpc/include/asm/kvm_guest.h @@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void) return static_branch_unlikely(&kvm_guest); }
-bool check_kvm_guest(void); +int check_kvm_guest(void); #else static inline bool is_kvm_guest(void) { return false; } -static inline bool check_kvm_guest(void) { return false; } +static inline int check_kvm_guest(void) { return 0; } #endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */ diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index c9e2819b095a..c7022c41cc31 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c @@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST) DEFINE_STATIC_KEY_FALSE(kvm_guest); -bool check_kvm_guest(void) +int __init check_kvm_guest(void) { struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor"); if (!hyper_node) - return false; + return 0;
if (!of_device_is_compatible(hyper_node, "linux,kvm")) - return false; + return 0;
static_branch_enable(&kvm_guest); - return true; + + return 0; } +core_initcall(check_kvm_guest); // before kvm_guest_init() #endif diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index c70b4be9f0a5..096629f54576 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -211,7 +211,9 @@ static __init void pSeries_smp_probe(void) if (!cpu_has_feature(CPU_FTR_SMT)) return;
- if (check_kvm_guest()) { + check_kvm_guest(); + + if (is_kvm_guest()) { /* * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp * faults to the hypervisor which then reads the instruction