svm_update_lbrv() always updates LBR MSRs intercepts, even when they are already set correctly. This results in force_msr_bitmap_recalc always being set to true on every nested transition, essentially undoing the hyperv optimization in nested_svm_merge_msrpm().
Fix it by keeping track of whether LBR MSRs are intercepted or not and only doing the update if needed, similar to x2avic_msrs_intercepted.
Avoid using svm_test_msr_bitmap_*() to check the status of the intercepts, as an arbitrary MSR will need to be chosen as a representative of all LBR MSRs, and this could theoretically break if some of the MSRs intercepts are handled differently from the rest.
Also, using svm_test_msr_bitmap_*() makes backports difficult as it was only recently introduced with no direct alternatives in older kernels.
Fixes: fbe5e5f030c2 ("KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/svm.c | 9 ++++++++- arch/x86/kvm/svm/svm.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 10c21e4c5406f..9d29b2e7e855d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -705,7 +705,11 @@ void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) { - bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); + struct vcpu_svm *svm = to_svm(vcpu); + bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); + + if (intercept == svm->lbr_msrs_intercepted) + return;
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept); svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept); @@ -714,6 +718,8 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
if (sev_es_guest(vcpu->kvm)) svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept); + + svm->lbr_msrs_intercepted = intercept; }
void svm_vcpu_free_msrpm(void *msrpm) @@ -1221,6 +1227,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) }
svm->x2avic_msrs_intercepted = true; + svm->lbr_msrs_intercepted = true;
svm->vmcb01.ptr = page_address(vmcb01_page); svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c856d8e0f95e7..dd78e64023450 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -336,6 +336,7 @@ struct vcpu_svm { bool guest_state_loaded;
bool x2avic_msrs_intercepted; + bool lbr_msrs_intercepted;
/* Guest GIF value, used when vGIF is not enabled */ bool guest_gif;
base-commit: 8a4821412cf2c1429fffa07c012dd150f2edf78c
In preparation for using svm_copy_lbrs() with 'struct vmcb_save_area' without a containing 'struct vmcb', and later even 'struct vmcb_save_area_cached', make it a macro. Pull the call to vmcb_mark_dirty() out to the callers.
Macros are generally not preferred compared to functions, mainly due to type-safety. However, in this case it seems like having a simple macro copying a few fields is better than copy-pasting the same 5 lines of code in different places.
On the bright side, pulling vmcb_mark_dirty() calls to the callers makes it clear that in one case, vmcb_mark_dirty() was being called on VMCB12. It is not architecturally defined for the CPU to clear arbitrary clean bits, and it is not needed, so drop that one call.
Technically fixes the non-architectural behavior of setting the dirty bit on VMCB12.
Fixes: d20c796ca370 ("KVM: x86: nSVM: implement nested LBR virtualization") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 16 ++++++++++------ arch/x86/kvm/svm/svm.c | 11 ----------- arch/x86/kvm/svm/svm.h | 10 +++++++++- 3 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ba0f11c68372..8e157ffbf4b1 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -676,10 +676,12 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 * Reserved bits of DEBUGCTL are ignored. Be consistent with * svm_set_msr's definition of reserved bits. */ - svm_copy_lbrs(vmcb02, vmcb12); + svm_copy_lbrs(&vmcb02->save, &vmcb12->save); + vmcb_mark_dirty(vmcb02, VMCB_LBR); vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS; } else { - svm_copy_lbrs(vmcb02, vmcb01); + svm_copy_lbrs(&vmcb02->save, &vmcb01->save); + vmcb_mark_dirty(vmcb02, VMCB_LBR); } svm_update_lbrv(&svm->vcpu); } @@ -1186,10 +1188,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) && - (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) - svm_copy_lbrs(vmcb12, vmcb02); - else - svm_copy_lbrs(vmcb01, vmcb02); + (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { + svm_copy_lbrs(&vmcb12->save, &vmcb02->save); + } else { + svm_copy_lbrs(&vmcb01->save, &vmcb02->save); + vmcb_mark_dirty(vmcb01, VMCB_LBR); + }
svm_update_lbrv(vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 24d59ccfa40d..2428b772546f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -804,17 +804,6 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) */ }
-void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb) -{ - to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; - to_vmcb->save.br_from = from_vmcb->save.br_from; - to_vmcb->save.br_to = from_vmcb->save.br_to; - to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; - to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; - - vmcb_mark_dirty(to_vmcb, VMCB_LBR); -} - static void __svm_enable_lbrv(struct kvm_vcpu *vcpu) { to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 01be93a53d07..8a642ab2936a 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -685,8 +685,16 @@ static inline void *svm_vcpu_alloc_msrpm(void) return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT); }
+#define svm_copy_lbrs(to, from) \ +({ \ + (to)->dbgctl = (from)->dbgctl; \ + (to)->br_from = (from)->br_from; \ + (to)->br_to = (from)->br_to; \ + (to)->last_excp_from = (from)->last_excp_from; \ + (to)->last_excp_to = (from)->last_excp_to; \ +}) + void svm_vcpu_free_msrpm(void *msrpm); -void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); void svm_enable_lbrv(struct kvm_vcpu *vcpu); void svm_update_lbrv(struct kvm_vcpu *vcpu);
MSR_IA32_DEBUGCTLMSR and LBR MSRs are currently not enumerated by KVM_GET_MSR_INDEX_LIST, and LBR MSRs cannot be set with KVM_SET_MSRS. So save/restore is completely broken.
Fix it by adding the MSRs to msrs_to_save_base, and allowing writes to LBR MSRs from userspace only (as they are read-only MSRs). Additionally, to correctly restore L1's LBRs while L2 is running, make sure the LBRs are copied from the captured VMCB01 save area in svm_copy_vmrun_state().
Fixes: 24e09cbf480a ("KVM: SVM: enable LBR virtualization") Cc: stable@vger.kernel.org Reported-by: Jim Mattson jmattson@google.com Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 3 +++ arch/x86/kvm/svm/svm.c | 20 ++++++++++++++++++++ arch/x86/kvm/x86.c | 3 +++ 3 files changed, 26 insertions(+)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 8e157ffbf4b1..53b149dbc930 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1057,6 +1057,9 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save, to_save->isst_addr = from_save->isst_addr; to_save->ssp = from_save->ssp; } + + if (lbrv) + svm_copy_lbrs(to_save, from_save); }
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2428b772546f..2bfc46f22485 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2999,6 +2999,26 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) vmcb_mark_dirty(svm->vmcb, VMCB_LBR); svm_update_lbrv(vcpu); break; + case MSR_IA32_LASTBRANCHFROMIP: + if (!msr->host_initiated) + return 1; + svm->vmcb->save.br_from = data; + break; + case MSR_IA32_LASTBRANCHTOIP: + if (!msr->host_initiated) + return 1; + svm->vmcb->save.br_to = data; + break; + case MSR_IA32_LASTINTFROMIP: + if (!msr->host_initiated) + return 1; + svm->vmcb->save.last_excp_from = data; + break; + case MSR_IA32_LASTINTTOIP: + if (!msr->host_initiated) + return 1; + svm->vmcb->save.last_excp_to = data; + break; case MSR_VM_HSAVE_PA: /* * Old kernels did not validate the value written to diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff8812f3a129..b3d4a8d06689 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -345,6 +345,9 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_U_CET, MSR_IA32_S_CET, MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP, MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB, + MSR_IA32_DEBUGCTLMSR, + MSR_IA32_LASTBRANCHFROMIP, MSR_IA32_LASTBRANCHTOIP, + MSR_IA32_LASTINTFROMIP, MSR_IA32_LASTINTTOIP, };
static const u32 msrs_to_save_pmu[] = {
nested_svm_vmrun() currently only injects a #GP if kvm_vcpu_map() fails with -EINVAL. But it could also fail with -EFAULT if creating a host mapping failed. Inject a #GP in all cases, no reason to treat failure modes differently.
Fixes: 8c5fbf1a7231 ("KVM/nSVM: Use the new mapping API for mapping guest memory") CC: stable@vger.kernel.org Co-developed-by: Sean Christopherson seanjc@google.com Signed-off-by: Sean Christopherson seanjc@google.com Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 53b149dbc930..a5a367fd8bb1 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -966,12 +966,9 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) }
vmcb12_gpa = svm->vmcb->save.rax; - ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); - if (ret == -EINVAL) { + if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) { kvm_inject_gp(vcpu, 0); return 1; - } else if (ret) { - return kvm_skip_emulated_instruction(vcpu); }
ret = kvm_skip_emulated_instruction(vcpu);
KVM currently injects a #GP and hopes for the best if mapping VMCB12 fails on nested #VMEXIT, and only if the failure mode is -EINVAL. Mapping the VMCB12 could also fail if creating host mappings fails.
After the #GP is injected, nested_svm_vmexit() bails early, without cleaning up (e.g. KVM_REQ_GET_NESTED_STATE_PAGES is set, is_guest_mode() is true, etc). Move mapping VMCB12 a bit later, after leaving guest mode and clearing KVM_REQ_GET_NESTED_STATE_PAGES, right before the VMCB12 is actually used.
Instead of optionally injecting a #GP, triple fault the guest if mapping VMCB12 fails since KVM cannot make a sane recovery. The APM states that a #VMEXIT will triple fault if host state is illegal or an exception occurs while loading host state, so the behavior is not entirely made up.
Also update the WARN_ON() in svm_get_nested_state_pages() to WARN_ON_ONCE() to avoid future user-triggeable bugs spamming kernel logs and potentially causing issues.
Fixes: cf74a78b229d ("KVM: SVM: Add VMEXIT handler and intercepts") CC: stable@vger.kernel.org Co-developed-by: Sean Christopherson seanjc@google.com Signed-off-by: Sean Christopherson seanjc@google.com Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index a5a367fd8bb1..e6b87ae46783 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1078,24 +1078,14 @@ void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) int nested_svm_vmexit(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; + gpa_t vmcb12_gpa = svm->nested.vmcb12_gpa; struct vmcb *vmcb01 = svm->vmcb01.ptr; struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; struct vmcb *vmcb12; struct kvm_host_map map; - int rc; - - rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); - if (rc) { - if (rc == -EINVAL) - kvm_inject_gp(vcpu, 0); - return 1; - } - - vmcb12 = map.hva;
/* Exit Guest-Mode */ leave_guest_mode(vcpu); - svm->nested.vmcb12_gpa = 0; WARN_ON_ONCE(svm->nested.nested_run_pending);
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); @@ -1103,8 +1093,16 @@ int nested_svm_vmexit(struct vcpu_svm *svm) /* in case we halted in L2 */ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
+ svm->nested.vmcb12_gpa = 0; + + if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) { + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return 1; + } + /* Give the current vmcb to the guest */
+ vmcb12 = map.hva; vmcb12->save.es = vmcb02->save.es; vmcb12->save.cs = vmcb02->save.cs; vmcb12->save.ss = vmcb02->save.ss; @@ -1259,8 +1257,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_svm_uninit_mmu_context(vcpu);
- rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true); - if (rc) + if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) return 1;
/* @@ -1893,7 +1890,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) { - if (WARN_ON(!is_guest_mode(vcpu))) + if (WARN_ON_ONCE(!is_guest_mode(vcpu))) return true;
if (!vcpu->arch.pdptrs_from_userspace &&
If loading L1's CR3 fails on a nested #VMEXIT, nested_svm_vmexit() returns an error code that is ignored by most callers, and continues to run L1 with corrupted state. A sane recovery is not possible in this case, and HW behavior is to cause a shutdown. Inject a triple fault instead.
From the APM: Upon #VMEXIT, the processor performs the following actions in order to return to the host execution context:
... if (illegal host state loaded, or exception while loading host state) shutdown else execute first host instruction following the VMRUN
Remove the return value of nested_svm_vmexit(), which is mostly unchecked anyway.
Fixes: d82aaef9c88a ("KVM: nSVM: use nested_svm_load_cr3() on guest->host switch") CC: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 12 ++++++------ arch/x86/kvm/svm/svm.c | 11 ++--------- arch/x86/kvm/svm/svm.h | 6 +++--- 3 files changed, 11 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e6b87ae46783..9500dd87d7a0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1075,7 +1075,7 @@ void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; }
-int nested_svm_vmexit(struct vcpu_svm *svm) +void nested_svm_vmexit(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; gpa_t vmcb12_gpa = svm->nested.vmcb12_gpa; @@ -1097,7 +1097,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return 1; + return; }
/* Give the current vmcb to the guest */ @@ -1257,8 +1257,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_svm_uninit_mmu_context(vcpu);
- if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) - return 1; + if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) { + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return; + }
/* * Drop what we picked up for L2 via svm_complete_interrupts() so it @@ -1283,8 +1285,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) */ if (kvm_apicv_activated(vcpu->kvm)) __kvm_vcpu_update_apicv(vcpu); - - return 0; }
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2bfc46f22485..a2c6d7e0b8ce 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2161,13 +2161,9 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) [SVM_INSTR_VMSAVE] = vmsave_interception, }; struct vcpu_svm *svm = to_svm(vcpu); - int ret;
if (is_guest_mode(vcpu)) { - /* Returns '1' or -errno on failure, '0' on success. */ - ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); - if (ret) - return ret; + nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); return 1; } return svm_instr_handlers[opcode](vcpu); @@ -4689,7 +4685,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_host_map map_save; - int ret;
if (!is_guest_mode(vcpu)) return 0; @@ -4709,9 +4704,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
- ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); - if (ret) - return ret; + nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
/* * KVM uses VMCB01 to store L1 host state while L2 runs but diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 8a642ab2936a..9aa60924623f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -765,15 +765,15 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu); void svm_copy_vmrun_state(struct vmcb_save_area *to_save, struct vmcb_save_area *from_save); void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); -int nested_svm_vmexit(struct vcpu_svm *svm); +void nested_svm_vmexit(struct vcpu_svm *svm);
-static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) +static inline void nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) { svm->vmcb->control.exit_code = exit_code; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; - return nested_svm_vmexit(svm); + nested_svm_vmexit(svm); }
int nested_svm_exit_handled(struct vcpu_svm *svm);
The wrappers provide little value and make it harder to see what KVM is checking in the normal flow. Drop them.
Opportunistically fixup comments referring to the functions, adding '()' to make it clear it's a reference to a function.
No functional change intended.
Co-developed-by: Sean Christopherson seanjc@google.com Signed-off-by: Sean Christopherson seanjc@google.com Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 36 ++++++++++-------------------------- 1 file changed, 10 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 9500dd87d7a0..512e377cfdec 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -324,8 +324,8 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); }
-static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, - struct vmcb_ctrl_area_cached *control) +static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, + struct vmcb_ctrl_area_cached *control) { if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) return false; @@ -352,8 +352,8 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, }
/* Common checks that apply to both L1 and L2 state. */ -static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, - struct vmcb_save_area_cached *save) +static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu, + struct vmcb_save_area_cached *save) { if (CC(!(save->efer & EFER_SVME))) return false; @@ -387,22 +387,6 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, return true; }
-static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) -{ - struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb_save_area_cached *save = &svm->nested.save; - - return __nested_vmcb_check_save(vcpu, save); -} - -static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) -{ - struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; - - return __nested_vmcb_check_controls(vcpu, ctl); -} - static void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, struct vmcb_ctrl_area_cached *to, @@ -435,7 +419,7 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, to->pause_filter_count = from->pause_filter_count; to->pause_filter_thresh = from->pause_filter_thresh;
- /* Copy asid here because nested_vmcb_check_controls will check it. */ + /* Copy asid here because nested_vmcb_check_controls() will check it */ to->asid = from->asid; to->msrpm_base_pa &= ~0x0fffULL; to->iopm_base_pa &= ~0x0fffULL; @@ -981,8 +965,8 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
- if (!nested_vmcb_check_save(vcpu) || - !nested_vmcb_check_controls(vcpu)) { + if (!nested_vmcb_check_save(vcpu, &svm->nested.save) || + !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = -1u; vmcb12->control.exit_info_1 = 0; @@ -1817,12 +1801,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
ret = -EINVAL; __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl); - if (!__nested_vmcb_check_controls(vcpu, &ctl_cached)) + if (!nested_vmcb_check_controls(vcpu, &ctl_cached)) goto out_free;
/* * Processor state contains L2 state. Check that it is - * valid for guest mode (see nested_vmcb_check_save). + * valid for guest mode (see nested_vmcb_check_save()). */ cr0 = kvm_read_cr0(vcpu); if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) @@ -1836,7 +1820,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, if (!(save->cr0 & X86_CR0_PG) || !(save->cr0 & X86_CR0_PE) || (save->rflags & X86_EFLAGS_VM) || - !__nested_vmcb_check_save(vcpu, &save_cached)) + !nested_vmcb_check_save(vcpu, &save_cached)) goto out_free;
In preparation for moving more changes that rely on is_guest_mode() before switching to VMCB02, move entering guest mode a bit earlier.
Nothing between the new callsite(s) and the old ones rely on is_guest_mode(), so this should be safe.
No functional change intended.
Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 512e377cfdec..384352365310 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -709,9 +709,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
nested_svm_transition_tlb_flush(vcpu);
- /* Enter Guest-Mode */ - enter_guest_mode(vcpu); - /* * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. @@ -899,6 +896,8 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
+ enter_guest_mode(vcpu); + nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
svm_switch_vmcb(svm, &svm->nested.vmcb02); @@ -1846,6 +1845,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); nested_copy_vmcb_control_to_cache(svm, ctl);
+ enter_guest_mode(vcpu); svm_switch_vmcb(svm, &svm->nested.vmcb02); nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
In preparation for moving nested_svm_merge_msrpm() within enter_svm_guest_mode(), which returns an errno, return an errno from nested_svm_merge_msrpm().
No functional change intended.
Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 384352365310..f46e97008492 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -261,7 +261,7 @@ int __init nested_svm_init_msrpm_merge_offsets(void) * is optimized in that it only merges the parts where KVM MSR permission bitmap * may contain zero bits. */ -static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) +static int nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm; @@ -288,17 +288,19 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) #endif
if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) - return true; + return 0;
for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) { const int p = nested_svm_msrpm_merge_offsets[i]; nsvm_msrpm_merge_t l1_val; gpa_t gpa; + int r;
gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val));
- if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val))) - return false; + r = kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val)); + if (r) + return r;
msrpm02[p] = msrpm01[p] | l1_val; } @@ -310,7 +312,7 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) #endif svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
- return true; + return 0; }
/* @@ -991,7 +993,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) goto out_exit_err;
- if (nested_svm_merge_msrpm(vcpu)) + if (!nested_svm_merge_msrpm(vcpu)) goto out;
out_exit_err: @@ -1887,7 +1889,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) return false;
- if (!nested_svm_merge_msrpm(vcpu)) { + if (nested_svm_merge_msrpm(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
In preparation for unifying the VMRUN failure code paths, move calling nested_svm_merge_msrpm() into enter_svm_guest_mode() next to the nested_svm_load_cr3() call (the other failure path in enter_svm_guest_mode()).
Adding more uses of the from_vmrun parameter is not pretty, but it is plumbed all the way to nested_svm_load_cr3() so it's not going away soon anyway.
No functional change intended.
Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index f46e97008492..2ee9d8bef5ba 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -911,6 +911,12 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, if (ret) return ret;
+ if (from_vmrun) { + ret = nested_svm_merge_msrpm(vcpu); + if (ret) + return ret; + } + if (!from_vmrun) kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
@@ -990,23 +996,18 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 1;
- if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) - goto out_exit_err; - - if (!nested_svm_merge_msrpm(vcpu)) - goto out; - -out_exit_err: - svm->nested.nested_run_pending = 0; - svm->nmi_l1_to_l2 = false; - svm->soft_int_injected = false; + if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) { + svm->nested.nested_run_pending = 0; + svm->nmi_l1_to_l2 = false; + svm->soft_int_injected = false;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR; - svm->vmcb->control.exit_code_hi = -1u; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + svm->vmcb->control.exit_code_hi = -1u; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0;
- nested_svm_vmexit(svm); + nested_svm_vmexit(svm); + }
out: kvm_vcpu_unmap(vcpu, &map);
In preparation for moving more code that depends on nested_svm_init_mmu_context() before switching to VMCB02, move the call outside of nested_vmcb02_prepare_control() into callers, a bit earlier. nested_svm_init_mmu_context() needs to be called after enter_guest_mode(), but not after switching to VMCB02.
No functional change intended.
Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 2ee9d8bef5ba..4781acfa3504 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -771,10 +771,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, /* Also overwritten later if necessary. */ vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
- /* nested_cr3. */ - if (nested_npt_enabled(svm)) - nested_svm_init_mmu_context(vcpu); - vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( vcpu->arch.l1_tsc_offset, svm->nested.ctl.tsc_offset, @@ -900,6 +896,9 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
enter_guest_mode(vcpu);
+ if (nested_npt_enabled(svm)) + nested_svm_init_mmu_context(vcpu); + nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
svm_switch_vmcb(svm, &svm->nested.vmcb02); @@ -1849,6 +1848,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, nested_copy_vmcb_control_to_cache(svm, ctl);
enter_guest_mode(vcpu); + + if (nested_npt_enabled(svm)) + nested_svm_init_mmu_context(vcpu); + svm_switch_vmcb(svm, &svm->nested.vmcb02); nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
In preparation for having a separate minimal #VMEXIT path for handling failed VMRUNs, move the minimal logic out of nested_svm_vmexit() into a helper.
This includes clearing the GIF, handling single-stepping on VMRUN, and a few data structure cleanups. Basically, everything that is required by the architecture (or KVM) on a #VMEXIT where L2 never actually ran.
Additionally move uninitializing the nested MMU and reloading host CR3 to the new helper. It is not required at this point, but following changes will require it.
No functional change intended.
Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 61 ++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 27 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 4781acfa3504..1356bd6383ca 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -929,6 +929,34 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, return 0; }
+static void __nested_svm_vmexit(struct vcpu_svm *svm) +{ + struct vmcb *vmcb01 = svm->vmcb01.ptr; + struct kvm_vcpu *vcpu = &svm->vcpu; + + svm->nested.vmcb12_gpa = 0; + svm->nested.ctl.nested_cr3 = 0; + + /* GIF is cleared on #VMEXIT, no event can be injected in L1 */ + svm_set_gif(svm, false); + vmcb01->control.exit_int_info = 0; + + nested_svm_uninit_mmu_context(vcpu); + if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) { + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return; + } + + /* + * If we are here following the completion of a VMRUN that + * is being single-stepped, queue the pending #DB intercept + * right now so that it an be accounted for before we execute + * L1's next instruction. + */ + if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF)) + kvm_queue_exception(vcpu, DB_VECTOR); +} + int nested_svm_vmrun(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1078,8 +1106,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm) /* in case we halted in L2 */ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
- svm->nested.vmcb12_gpa = 0; - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; @@ -1194,13 +1220,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm) } }
- /* - * On vmexit the GIF is set to false and - * no event can be injected in L1. - */ - svm_set_gif(svm, false); - vmcb01->control.exit_int_info = 0; - svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; @@ -1213,8 +1232,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm) svm_write_tsc_multiplier(vcpu); }
- svm->nested.ctl.nested_cr3 = 0; - /* * Restore processor state that had been saved in vmcb01 */ @@ -1240,13 +1257,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm)
nested_svm_transition_tlb_flush(vcpu);
- nested_svm_uninit_mmu_context(vcpu); - - if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) { - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return; - } - /* * Drop what we picked up for L2 via svm_complete_interrupts() so it * doesn't end up in L1. @@ -1255,21 +1265,18 @@ void nested_svm_vmexit(struct vcpu_svm *svm) kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu);
- /* - * If we are here following the completion of a VMRUN that - * is being single-stepped, queue the pending #DB intercept - * right now so that it an be accounted for before we execute - * L1's next instruction. - */ - if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF)) - kvm_queue_exception(&(svm->vcpu), DB_VECTOR); - /* * Un-inhibit the AVIC right away, so that other vCPUs can start * to benefit from it right away. */ if (kvm_apicv_activated(vcpu->kvm)) __kvm_vcpu_update_apicv(vcpu); + + /* + * Potentially queues an exception, so it needs to be after + * kvm_clear_exception_queue() is called above. + */ + __nested_svm_vmexit(svm); }
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
There are currently two possible causes of VMRUN failures:
1) Consistency checks failures. In this case, KVM updates the exit code in the mapped VMCB12 and exits early in nested_svm_vmrun(). This causes a few problems:
A) KVM does not clear the GIF if the early consistency checks fail (because nested_svm_vmexit() is not called). Nothing requires GIF=0 before a VMRUN, from the APM:
It is assumed that VMM software cleared GIF some time before executing the VMRUN instruction, to ensure an atomic state switch.
So an early #VMEXIT from early consistency checks could leave the GIF set.
B) svm_leave_smm() is missing consistency checks on the newly loaded guest state, because the checks aren't performed by enter_svm_guest_mode().
2) Faiure to load L2's CR3 or merge the MSR bitmaps. In this case, a fully-fledged #VMEXIT injection is performed as VMCB02 is already prepared.
Arguably all VMRUN failures should be handled before the VMCB02 is prepared, but with proper cleanup (e.g. clear the GIF). Move all the potential failure checks inside enter_svm_guest_mode() before switching to VMCB02. On failure of any of these checks, nested_svm_vmrun() synthesizes a minimal #VMEXIT through the new nested_svm_failed_vmrun() helper.
__nested_svm_vmexit() already performs the necessary cleanup for a failed VMRUN, including uninitializing the nested MMU and reloading L1's CR3. This ensures that consistency check failures do proper necessary cleanup, while other failures do not doo too much cleanup. It also leaves a unified path for handling VMRUN failures.
Cc: stable@vger.kernel.org Fixes: 52c65a30a5c6 ("KVM: SVM: Check for nested vmrun intercept before emulating vmrun") Suggested-by: Sean Christopherson seanjc@google.com Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 58 +++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 1356bd6383ca..632e941febaf 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -889,22 +889,19 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, vmcb12->control.intercepts[INTERCEPT_WORD4], vmcb12->control.intercepts[INTERCEPT_WORD5]);
- svm->nested.vmcb12_gpa = vmcb12_gpa;
WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
enter_guest_mode(vcpu);
+ if (!nested_vmcb_check_save(vcpu, &svm->nested.save) || + !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) + return -EINVAL; + if (nested_npt_enabled(svm)) nested_svm_init_mmu_context(vcpu);
- nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); - - svm_switch_vmcb(svm, &svm->nested.vmcb02); - nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base); - nested_vmcb02_prepare_save(svm, vmcb12); - ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, nested_npt_enabled(svm), from_vmrun); if (ret) @@ -916,6 +913,17 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, return ret; }
+ /* + * Any VMRUN failure needs to happen before this point, such that the + * nested #VMEXIT is injected properly by nested_svm_failed_vmrun(). + */ + + nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); + + svm_switch_vmcb(svm, &svm->nested.vmcb02); + nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base); + nested_vmcb02_prepare_save(svm, vmcb12); + if (!from_vmrun) kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
@@ -957,6 +965,17 @@ static void __nested_svm_vmexit(struct vcpu_svm *svm) kvm_queue_exception(vcpu, DB_VECTOR); }
+static void nested_svm_failed_vmrun(struct vcpu_svm *svm, struct vmcb *vmcb12) +{ + WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); + + vmcb12->control.exit_code = SVM_EXIT_ERR; + vmcb12->control.exit_code_hi = -1u; + vmcb12->control.exit_info_1 = 0; + vmcb12->control.exit_info_2 = 0; + __nested_svm_vmexit(svm); +} + int nested_svm_vmrun(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -999,15 +1018,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
- if (!nested_vmcb_check_save(vcpu, &svm->nested.save) || - !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { - vmcb12->control.exit_code = SVM_EXIT_ERR; - vmcb12->control.exit_code_hi = -1u; - vmcb12->control.exit_info_1 = 0; - vmcb12->control.exit_info_2 = 0; - goto out; - } - /* * Since vmcb01 is not in use, we can use it to store some of the L1 * state. @@ -1028,15 +1038,9 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) svm->nmi_l1_to_l2 = false; svm->soft_int_injected = false;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR; - svm->vmcb->control.exit_code_hi = -1u; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; - - nested_svm_vmexit(svm); + nested_svm_failed_vmrun(svm, vmcb12); }
-out: kvm_vcpu_unmap(vcpu, &map);
return ret; @@ -1172,6 +1176,8 @@ void nested_svm_vmexit(struct vcpu_svm *svm)
kvm_nested_vmexit_handle_ibrs(vcpu);
+ /* VMRUN failures before switching to VMCB02 are handled by nested_svm_failed_vmrun() */ + WARN_ON_ONCE(svm->vmcb != svm->nested.vmcb02.ptr); svm_switch_vmcb(svm, &svm->vmcb01);
/* @@ -1859,9 +1865,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, if (nested_npt_enabled(svm)) nested_svm_init_mmu_context(vcpu);
- svm_switch_vmcb(svm, &svm->nested.vmcb02); - nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base); - /* * While the nested guest CR3 is already checked and set by * KVM_SET_SREGS, it was set when nested state was yet loaded, @@ -1874,6 +1877,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, if (WARN_ON_ONCE(ret)) goto out_free;
+ svm_switch_vmcb(svm, &svm->nested.vmcb02); + nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base); + svm->nested.force_msr_bitmap_recalc = true;
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
According to the APM, from the reference of the VMRUN instruction:
Upon #VMEXIT, the processor performs the following actions in order to return to the host execution context: ... clear EVENTINJ field in VMCB
KVM correctly cleared EVENTINJ (i.e. event_inj and event_inj_err) on nested #VMEXIT before commit 2d8a42be0e2b ("KVM: nSVM: synchronize VMCB controls updated by the processor on every vmexit"). That commit made sure the fields are synchronized between VMCB02 and KVM's cached VMCB12 on every L2->L0 #VMEXIT, such that they are serialized correctly on save/restore.
However, the commit also incorrectly copied the fields from KVM's cached VMCB12 to L1's VMCB12 on nested #VMEXIT. Go back to clearing the fields, and so in __nested_svm_vmexit() instead of nested_svm_vmexit(), such that it also applies to #VMEXITs caused by a failed VMRUN.
Fixes: 2d8a42be0e2b ("KVM: nSVM: synchronize VMCB controls updated by the processor on every vmexit") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 632e941febaf..b4074e674c9d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -937,7 +937,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, return 0; }
-static void __nested_svm_vmexit(struct vcpu_svm *svm) +static void __nested_svm_vmexit(struct vcpu_svm *svm, struct vmcb *vmcb12) { struct vmcb *vmcb01 = svm->vmcb01.ptr; struct kvm_vcpu *vcpu = &svm->vcpu; @@ -949,6 +949,10 @@ static void __nested_svm_vmexit(struct vcpu_svm *svm) svm_set_gif(svm, false); vmcb01->control.exit_int_info = 0;
+ /* event_inj is cleared on #VMEXIT */ + vmcb12->control.event_inj = 0; + vmcb12->control.event_inj_err = 0; + nested_svm_uninit_mmu_context(vcpu); if (nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); @@ -973,7 +977,7 @@ static void nested_svm_failed_vmrun(struct vcpu_svm *svm, struct vmcb *vmcb12) vmcb12->control.exit_code_hi = -1u; vmcb12->control.exit_info_1 = 0; vmcb12->control.exit_info_2 = 0; - __nested_svm_vmexit(svm); + __nested_svm_vmexit(svm, vmcb12); }
int nested_svm_vmrun(struct kvm_vcpu *vcpu) @@ -1156,8 +1160,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.next_rip = vmcb02->control.next_rip;
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; - vmcb12->control.event_inj = svm->nested.ctl.event_inj; - vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
if (!kvm_pause_in_guest(vcpu->kvm)) { vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count; @@ -1259,8 +1261,6 @@ void nested_svm_vmexit(struct vcpu_svm *svm) vmcb12->control.exit_int_info_err, KVM_ISA_SVM);
- kvm_vcpu_unmap(vcpu, &map); - nested_svm_transition_tlb_flush(vcpu);
/* @@ -1282,7 +1282,9 @@ void nested_svm_vmexit(struct vcpu_svm *svm) * Potentially queues an exception, so it needs to be after * kvm_clear_exception_queue() is called above. */ - __nested_svm_vmexit(svm); + __nested_svm_vmexit(svm, vmcb12); + + kvm_vcpu_unmap(vcpu, &map); }
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
KVM currenty fails a nested VMRUN and injects VMEXIT_INVALID (aka SVM_EXIT_ERR) if L1 sets NP_ENABLE and the host does not support NPTs. On first glance, it seems like the check should actually be for guest_cpu_cap_has(X86_FEATURE_NPT) instead, as it is possible for the host to support NPTs but the guest CPUID to not advertise it.
However, the consistency check is not architectural to begin with. The APM does not mention VMEXIT_INVALID if NP_ENABLE is set on a processor that does not have X86_FEATURE_NPT. Hence, NP_ENABLE should be ignored if X86_FEATURE_NPT is not available for L1, so sanitize it when copying from the VMCB12 to KVM's cache.
Apart from the consistency check, NP_ENABLE in VMCB12 is currently ignored because the bit is actually copied from VMCB01 to VMCB02, not from VMCB12.
Fixes: 4b16184c1cca ("KVM: SVM: Initialize Nested Nested MMU context on VMRUN") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index b4074e674c9d..24b10188fb91 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -335,9 +335,6 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, if (CC(control->asid == 0)) return false;
- if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) - return false; - if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, MSRPM_SIZE))) return false; @@ -399,6 +396,11 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, for (i = 0; i < MAX_INTERCEPT; i++) to->intercepts[i] = from->intercepts[i];
+ /* Always clear NP_ENABLE if the guest cannot use NPTs */ + to->nested_ctl = from->nested_ctl; + if (!guest_cpu_cap_has(vcpu, X86_FEATURE_NPT)) + to->nested_ctl &= ~SVM_NESTED_CTL_NP_ENABLE; + to->iopm_base_pa = from->iopm_base_pa; to->msrpm_base_pa = from->msrpm_base_pa; to->tsc_offset = from->tsc_offset; @@ -412,7 +414,6 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, to->exit_info_2 = from->exit_info_2; to->exit_int_info = from->exit_int_info; to->exit_int_info_err = from->exit_int_info_err; - to->nested_ctl = from->nested_ctl; to->event_inj = from->event_inj; to->event_inj_err = from->event_inj_err; to->next_rip = from->next_rip;
From the APM Volume #2, 15.25.4 (24593—Rev. 3.42—March 2024):
When VMRUN is executed with nested paging enabled (NP_ENABLE = 1), the following conditions are considered illegal state combinations, in addition to those mentioned in “Canonicalization and Consistency Checks”: • Any MBZ bit of nCR3 is set. • Any G_PAT.PA field has an unsupported type encoding or any reserved field in G_PAT has a nonzero value.
Add the consistency check for nCR3 being a legal GPA with no MBZ bits set. The G_PAT.PA check was proposed separately [*].
[*]https://lore.kernel.org/kvm/20251107201151.3303170-6-jmattson@google.com/
Fixes: 4b16184c1cca ("KVM: SVM: Initialize Nested Nested MMU context on VMRUN") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 24b10188fb91..cac61d65efc7 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -335,6 +335,11 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, if (CC(control->asid == 0)) return false;
+ if (nested_npt_enabled(to_svm(vcpu))) { + if (CC(!kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3))) + return false; + } + if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, MSRPM_SIZE))) return false;
From the APM Volume #2, 15.25.3 (24593—Rev. 3.42—March 2024):
If VMRUN is executed with hCR0.PG cleared to zero and NP_ENABLE set to 1 , VMRUN terminates with #VMEXIT(VMEXIT_INVALID).
Add the consistency check by plumbing L1's CR0 to nested_vmcb_check_controls().
Fixes: 4b16184c1cca ("KVM: SVM: Initialize Nested Nested MMU context on VMRUN") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index cac61d65efc7..5184e5ae8ccf 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -327,7 +327,8 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) }
static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, - struct vmcb_ctrl_area_cached *control) + struct vmcb_ctrl_area_cached *control, + unsigned long l1_cr0) { if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) return false; @@ -338,6 +339,8 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, if (nested_npt_enabled(to_svm(vcpu))) { if (CC(!kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3))) return false; + if (CC(!(l1_cr0 & X86_CR0_PG))) + return false; }
if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, @@ -902,7 +905,8 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, enter_guest_mode(vcpu);
if (!nested_vmcb_check_save(vcpu, &svm->nested.save) || - !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) + !nested_vmcb_check_controls(vcpu, &svm->nested.ctl, + svm->vmcb01.ptr->save.cr0)) return -EINVAL;
if (nested_npt_enabled(svm)) @@ -1823,7 +1827,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
ret = -EINVAL; __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl); - if (!nested_vmcb_check_controls(vcpu, &ctl_cached)) + /* 'save' contains L1 state saved from before VMRUN */ + if (!nested_vmcb_check_controls(vcpu, &ctl_cached, save->cr0)) goto out_free;
/*
According to the APM Volume #2, 15.5, Canonicalization and Consistency Checks (24593—Rev. 3.42—March 2024), the following condition (among others) results in a #VMEXIT with VMEXIT_INVALID (aka SVM_EXIT_ERR):
EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
Add the missing consistency check. This is functionally a nop because the nested VMRUN results in SVM_EXIT_ERR in HW, which is forwarded to L1, but KVM makes all consistency checks before a VMRUN is actually attempted.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 7 +++++++ arch/x86/kvm/svm/svm.h | 1 + 2 files changed, 8 insertions(+)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 5184e5ae8ccf..47d4316b126d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -382,6 +382,11 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu, CC(!(save->cr0 & X86_CR0_PE)) || CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) return false; + + if (CC((save->cr4 & X86_CR4_PAE) && + (save->cs.attrib & SVM_SELECTOR_L_MASK) && + (save->cs.attrib & SVM_SELECTOR_DB_MASK))) + return false; }
/* Note, SVM doesn't have any additional restrictions on CR4. */ @@ -458,6 +463,8 @@ static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, * Copy only fields that are validated, as we need them * to avoid TOC/TOU races. */ + to->cs = from->cs; + to->efer = from->efer; to->cr0 = from->cr0; to->cr3 = from->cr3; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 9aa60924623f..d0de7d390889 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -139,6 +139,7 @@ struct kvm_vmcb_info { };
struct vmcb_save_area_cached { + struct vmcb_seg cs; u64 efer; u64 cr4; u64 cr3;
According to the APM Volume #2, 15.20 (24593—Rev. 3.42—March 2024):
VMRUN exits with VMEXIT_INVALID error code if either: • Reserved values of TYPE have been specified, or • TYPE = 3 (exception) has been specified with a vector that does not correspond to an exception (this includes vector 2, which is an NMI, not an exception).
Add the missing consistency checks to KVM. For the second point, inject VMEXIT_INVALID if the vector is anything but the vectors defined by the APM for exceptions. Reserved vectors are also considered invalid, which matches the HW behavior. Vector 9 (i.e. #CSO) is considered invalid because it is reserved on modern CPUs, and according to LLMs no CPUs exist supporting SVM and producing #CSOs.
Defined exceptions could be different between virtual CPUs as new CPUs define new vectors. In a best effort to dynamically define the valid vectors, make all currently defined vectors as valid except those obviously tied to a CPU feature: SHSTK -> #CP and SEV-ES -> #VC. As new vectors are defined, they can similarly be tied to corresponding CPU features.
Invalid vectors on specific (e.g. old) CPUs that are missed by KVM should be rejected by HW anyway.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") CC: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev --- arch/x86/kvm/svm/nested.c | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 47d4316b126d..229903e0ad40 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -326,6 +326,54 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); }
+static bool nested_svm_event_inj_valid_exept(struct kvm_vcpu *vcpu, u8 vector) +{ + /* + * Vectors that do not correspond to a defined exception are invalid + * (including #NMI and reserved vectors). In a best effort to define + * valid exceptions based on the virtual CPU, make all exceptions always + * valid except those obviously tied to a CPU feature. + */ + switch (vector) { + case DE_VECTOR: case DB_VECTOR: case BP_VECTOR: case OF_VECTOR: + case BR_VECTOR: case UD_VECTOR: case NM_VECTOR: case DF_VECTOR: + case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: + case PF_VECTOR: case MF_VECTOR: case AC_VECTOR: case MC_VECTOR: + case XM_VECTOR: case HV_VECTOR: case SX_VECTOR: + return true; + case CP_VECTOR: + return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK); + case VC_VECTOR: + return guest_cpu_cap_has(vcpu, X86_FEATURE_SEV_ES); + } + return false; +} + +/* + * According to the APM, VMRUN exits with SVM_EXIT_ERR if SVM_EVTINJ_VALID is + * set and: + * - The type of event_inj is not one of the defined values. + * - The type is SVM_EVTINJ_TYPE_EXEPT, but the vector is not a valid exception. + */ +static bool nested_svm_check_event_inj(struct kvm_vcpu *vcpu, u32 event_inj) +{ + u32 type = event_inj & SVM_EVTINJ_TYPE_MASK; + u8 vector = event_inj & SVM_EVTINJ_VEC_MASK; + + if (!(event_inj & SVM_EVTINJ_VALID)) + return true; + + if (type != SVM_EVTINJ_TYPE_INTR && type != SVM_EVTINJ_TYPE_NMI && + type != SVM_EVTINJ_TYPE_EXEPT && type != SVM_EVTINJ_TYPE_SOFT) + return false; + + if (type == SVM_EVTINJ_TYPE_EXEPT && + !nested_svm_event_inj_valid_exept(vcpu, vector)) + return false; + + return true; +} + static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, struct vmcb_ctrl_area_cached *control, unsigned long l1_cr0) @@ -355,6 +403,9 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, return false; }
+ if (CC(!nested_svm_check_event_inj(vcpu, control->event_inj))) + return false; + return true; }
On Mon, Dec 15, 2025 at 07:26:54PM +0000, Yosry Ahmed wrote:
svm_update_lbrv() always updates LBR MSRs intercepts, even when they are already set correctly. This results in force_msr_bitmap_recalc always being set to true on every nested transition, essentially undoing the hyperv optimization in nested_svm_merge_msrpm().
Fix it by keeping track of whether LBR MSRs are intercepted or not and only doing the update if needed, similar to x2avic_msrs_intercepted.
Avoid using svm_test_msr_bitmap_*() to check the status of the intercepts, as an arbitrary MSR will need to be chosen as a representative of all LBR MSRs, and this could theoretically break if some of the MSRs intercepts are handled differently from the rest.
Also, using svm_test_msr_bitmap_*() makes backports difficult as it was only recently introduced with no direct alternatives in older kernels.
Fixes: fbe5e5f030c2 ("KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev
Sigh.. I had this patch file in my working directory and it was sent by mistake with the series, as the cover letter nonetheless. Sorry about that. Let me know if I should resend.
arch/x86/kvm/svm/svm.c | 9 ++++++++- arch/x86/kvm/svm/svm.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 10c21e4c5406f..9d29b2e7e855d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -705,7 +705,11 @@ void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask) static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) {
- bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
- struct vcpu_svm *svm = to_svm(vcpu);
- bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
- if (intercept == svm->lbr_msrs_intercepted)
return;svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept); svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept); @@ -714,6 +718,8 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu) if (sev_es_guest(vcpu->kvm)) svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
- svm->lbr_msrs_intercepted = intercept;
} void svm_vcpu_free_msrpm(void *msrpm) @@ -1221,6 +1227,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) } svm->x2avic_msrs_intercepted = true;
- svm->lbr_msrs_intercepted = true;
svm->vmcb01.ptr = page_address(vmcb01_page); svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c856d8e0f95e7..dd78e64023450 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -336,6 +336,7 @@ struct vcpu_svm { bool guest_state_loaded; bool x2avic_msrs_intercepted;
- bool lbr_msrs_intercepted;
/* Guest GIF value, used when vGIF is not enabled */ bool guest_gif;
base-commit: 8a4821412cf2c1429fffa07c012dd150f2edf78c
2.51.2.1041.gc1ab5b90ca-goog
On Mon, Dec 15, 2025, Yosry Ahmed wrote:
On Mon, Dec 15, 2025 at 07:26:54PM +0000, Yosry Ahmed wrote:
svm_update_lbrv() always updates LBR MSRs intercepts, even when they are already set correctly. This results in force_msr_bitmap_recalc always being set to true on every nested transition, essentially undoing the hyperv optimization in nested_svm_merge_msrpm().
Fix it by keeping track of whether LBR MSRs are intercepted or not and only doing the update if needed, similar to x2avic_msrs_intercepted.
Avoid using svm_test_msr_bitmap_*() to check the status of the intercepts, as an arbitrary MSR will need to be chosen as a representative of all LBR MSRs, and this could theoretically break if some of the MSRs intercepts are handled differently from the rest.
Also, using svm_test_msr_bitmap_*() makes backports difficult as it was only recently introduced with no direct alternatives in older kernels.
Fixes: fbe5e5f030c2 ("KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev
Sigh.. I had this patch file in my working directory and it was sent by mistake with the series, as the cover letter nonetheless. Sorry about that. Let me know if I should resend.
Eh, it's fine for now. The important part is clarfying that this patch should be ignored, which you've already done.
On Mon, Dec 15, 2025 at 11:38:00AM -0800, Sean Christopherson wrote:
On Mon, Dec 15, 2025, Yosry Ahmed wrote:
On Mon, Dec 15, 2025 at 07:26:54PM +0000, Yosry Ahmed wrote:
svm_update_lbrv() always updates LBR MSRs intercepts, even when they are already set correctly. This results in force_msr_bitmap_recalc always being set to true on every nested transition, essentially undoing the hyperv optimization in nested_svm_merge_msrpm().
Fix it by keeping track of whether LBR MSRs are intercepted or not and only doing the update if needed, similar to x2avic_msrs_intercepted.
Avoid using svm_test_msr_bitmap_*() to check the status of the intercepts, as an arbitrary MSR will need to be chosen as a representative of all LBR MSRs, and this could theoretically break if some of the MSRs intercepts are handled differently from the rest.
Also, using svm_test_msr_bitmap_*() makes backports difficult as it was only recently introduced with no direct alternatives in older kernels.
Fixes: fbe5e5f030c2 ("KVM: nSVM: Always recalculate LBR MSR intercepts in svm_update_lbrv()") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed yosry.ahmed@linux.dev
Sigh.. I had this patch file in my working directory and it was sent by mistake with the series, as the cover letter nonetheless. Sorry about that. Let me know if I should resend.
Eh, it's fine for now. The important part is clarfying that this patch should be ignored, which you've already done.
FWIW that patch is already in Linus's tree so even if someone applies it, it should be fine.
linux-stable-mirror@lists.linaro.org