From: Josh Poimboeuf jpoimboe@kernel.org
commit fc02735b14fff8c6678b521d324ade27b1a3d4cf upstream.
On eIBRS systems, the returns in the vmexit return path from __vmx_vcpu_run() to vmx_vcpu_run() are exposed to RSB poisoning attacks.
Fix that by moving the post-vmexit spec_ctrl handling to immediately after the vmexit.
Signed-off-by: Josh Poimboeuf jpoimboe@kernel.org Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Borislav Petkov bp@suse.de Signed-off-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com [ bp: Adjust for the fact that vmexit is in inline assembly ] Signed-off-by: Suraj Jitindar Singh surajjs@amazon.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Suleiman Souhlal suleiman@google.com --- arch/x86/include/asm/nospec-branch.h | 3 +- arch/x86/kernel/cpu/bugs.c | 4 +++ arch/x86/kvm/vmx.c | 46 ++++++++++++++++++++++++---- 3 files changed, 46 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index c990c3b2ada5..3e1d2389c00d 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -271,7 +271,7 @@ extern char __indirect_thunk_end[]; * retpoline and IBRS mitigations for Spectre v2 need this; only on future * CPUs with IBRS_ALL *might* it be avoided. */ -static inline void vmexit_fill_RSB(void) +static __always_inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE unsigned long loops; @@ -306,6 +306,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; +extern u64 x86_spec_ctrl_current; extern void write_spec_ctrl_current(u64 val, bool force); extern u64 spec_ctrl_current(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 95d8b517cf4d..022b9f31333f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -185,6 +185,10 @@ void __init check_bugs(void) #endif }
+/* + * NOTE: For VMX, this function is not called in the vmexit path. + * It uses vmx_spec_ctrl_restore_host() instead. + */ void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 952a58cad25f..951cec231e7f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10760,10 +10760,31 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->hv_timer_armed = false; }
+u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx) +{ + u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current); + + if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) + return 0; + + guestval = __rdmsr(MSR_IA32_SPEC_CTRL); + + /* + * If the guest/host SPEC_CTRL values differ, restore the host value. + */ + if (guestval != hostval) + native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); + + barrier_nospec(); + + return guestval; +} + static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long cr3, cr4, evmcs_rsp; + u64 spec_ctrl;
/* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && @@ -10989,6 +11010,24 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif );
+ /* + * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before + * the first unbalanced RET after vmexit! + * + * For retpoline, RSB filling is needed to prevent poisoned RSB entries + * and (in some cases) RSB underflow. + * + * eIBRS has its own protection against poisoned RSB, so it doesn't + * need the RSB filling sequence. But it does need to be enabled + * before the first unbalanced RET. + * + * So no RETs before vmx_spec_ctrl_restore_host() below. + */ + vmexit_fill_RSB(); + + /* Save this for below */ + spec_ctrl = vmx_spec_ctrl_restore_host(vmx); + vmx_enable_fb_clear(vmx);
/* @@ -11007,12 +11046,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * save it. */ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) - vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - - x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); - - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); + vmx->spec_ctrl = spec_ctrl;
/* All fields are clean at this point */ if (static_branch_unlikely(&enable_evmcs))