On 07/11/2022 14:54, Paolo Bonzini wrote:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 4cfa62e66a0e..ae65cdcab660 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3924,16 +3924,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
/*
* Use a single vmcb (vmcb01 because it's always valid) for
* context switching guest state via VMLOAD/VMSAVE, that way
* the state doesn't need to be copied between vmcb01 and
* vmcb02 when switching vmcbs for nested virtualization.
*/
__svm_vcpu_run(vmcb_pa, svm);vmload(svm->vmcb01.pa);
vmsave(svm->vmcb01.pa);
- vmload(__sme_page_pa(sd->save_area));
%gs is still the guests until this vmload has completed. It needs to move down into asm too.
~Andrew