From: Sean Christopherson seanjc@google.com
[ Upstream commit 119b5cb4ffd0166f3e98e9ee042f5046f7744f28 ]
Use KVM VMX's reboot/crash callback to do VMXOFF in an emergency instead of manually and blindly doing VMXOFF. There's no need to attempt VMXOFF if a hypervisor, i.e. KVM, isn't loaded/active, i.e. if the CPU can't possibly be post-VMXON.
Reviewed-by: Kai Huang kai.huang@intel.com Link: https://lore.kernel.org/r/20230721201859.2307736-4-seanjc@google.com Signed-off-by: Sean Christopherson seanjc@google.com Stable-dep-of: a0ee1d5faff1 ("KVM: VMX: Flush shadow VMCS on emergency reboot") Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/virtext.h | 10 ---------- arch/x86/kernel/reboot.c | 29 +++++++++-------------------- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 3 files changed, 14 insertions(+), 33 deletions(-)
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 724ce44809ed2..1b683bf71d14f 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -70,16 +70,6 @@ static inline void __cpu_emergency_vmxoff(void) cpu_vmxoff(); }
-/** Disable VMX if it is supported and enabled on the current CPU - */ -static inline void cpu_emergency_vmxoff(void) -{ - if (cpu_has_vmx()) - __cpu_emergency_vmxoff(); -} - - -
/* * SVM functions: diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 635995e7a704a..79e1ac3d0625d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -787,13 +787,7 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif
-/* - * This is used to VMCLEAR all VMCSs loaded on the - * processor. And when loading kvm_intel module, the - * callback function pointer will be assigned. - * - * protected by rcu. - */ +/* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback;
void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) @@ -815,17 +809,6 @@ void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) } EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback);
-static inline void cpu_crash_vmclear_loaded_vmcss(void) -{ - cpu_emergency_virt_cb *callback; - - rcu_read_lock(); - callback = rcu_dereference(cpu_emergency_virt_callback); - if (callback) - callback(); - rcu_read_unlock(); -} - /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1;
@@ -836,9 +819,15 @@ int crashing_cpu = -1; */ void cpu_emergency_disable_virtualization(void) { - cpu_crash_vmclear_loaded_vmcss(); + cpu_emergency_virt_cb *callback; + + rcu_read_lock(); + callback = rcu_dereference(cpu_emergency_virt_callback); + if (callback) + callback(); + rcu_read_unlock();
- cpu_emergency_vmxoff(); + /* KVM_AMD doesn't yet utilize the common callback. */ cpu_emergency_svm_disable(); }
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index aef2f09718b57..ef9cb8445dc48 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -705,7 +705,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; }
-static void crash_vmclear_local_loaded_vmcss(void) +static void vmx_emergency_disable(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; @@ -713,6 +713,8 @@ static void crash_vmclear_local_loaded_vmcss(void) list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); + + __cpu_emergency_vmxoff(); }
static void __loaded_vmcs_clear(void *arg) @@ -8554,7 +8556,7 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false;
- cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
vmx_cleanup_l1d_flush(); } @@ -8628,7 +8630,7 @@ static int __init vmx_init(void) pi_init_cpu(cpu); }
- cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_register_virt_callback(vmx_emergency_disable);
vmx_check_vmcs12_offsets();