VMX also uses KVM_REQ_GET_NESTED_STATE_PAGES for the Hyper-V eVMCS, which may need to be loaded outside guest mode. Therefore we cannot WARN in that case.
However, that part of nested_get_vmcs12_pages is _not_ needed at vmentry time. Split it out of KVM_REQ_GET_NESTED_STATE_PAGES handling, so that both vmentry and migration (and in the latter case, independent of is_guest_mode) do the parts that are needed.
Cc: stable@vger.kernel.org # 5.10.x: f2c7ef3ba: KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES Cc: stable@vger.kernel.org # 5.10.x Signed-off-by: Paolo Bonzini pbonzini@redhat.com --- arch/x86/kvm/svm/nested.c | 3 +++ arch/x86/kvm/vmx/nested.c | 32 ++++++++++++++++++++++++++------ arch/x86/kvm/x86.c | 4 +--- 3 files changed, 30 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index cb4c6ee10029..7a605ad8254d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu);
+ if (WARN_ON(!is_guest_mode(vcpu))) + return true; + if (!nested_svm_vmrun_msrpm(svm)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 0fbb46990dfc..20ab40a2ac34 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) return 0; }
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) +static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_host_map *map; - struct page *page; - u64 hpa;
/* * hv_evmcs may end up being not mapped after migration (when @@ -3152,6 +3148,19 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) return false; } } + return true; +} + +static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_host_map *map; + struct page *page; + u64 hpa; + + if (!nested_get_evmcs_page(vcpu)) + return false;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* @@ -3224,6 +3233,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) return true; }
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) +{ + if (!nested_get_evmcs_page(vcpu)) + return false; + + if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu)) + return false; + + return true; +} + static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; @@ -6602,7 +6622,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .hv_timer_pending = nested_vmx_preemption_timer_pending, .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state, - .get_nested_state_pages = nested_get_vmcs12_pages, + .get_nested_state_pages = vmx_get_nested_state_pages, .write_log_dirty = nested_vmx_write_pml_buffer, .enable_evmcs = nested_enable_evmcs, .get_evmcs_version = nested_get_evmcs_version, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9a8969a6dd06..b910aa74ee05 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8802,9 +8802,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { - if (WARN_ON_ONCE(!is_guest_mode(vcpu))) - ; - else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { + if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { r = 0; goto out; }
On Mon, Jan 25, 2021, Paolo Bonzini wrote:
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) +{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
- if (!nested_get_evmcs_page(vcpu))
return false;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* @@ -3224,6 +3233,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) return true; } +static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) +{
- if (!nested_get_evmcs_page(vcpu))
return false;
- if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
nested_get_evmcs_page() will get called twice in the common case of is_guest_mode() == true. I can't tell if that will ever be fatal, but it's definitely weird. Maybe this?
if (!is_guest_mode(vcpu)) return nested_get_evmcs_page(vcpu);
return nested_get_vmcs12_pages(vcpu);
- return true;
+}
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; @@ -6602,7 +6622,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { .hv_timer_pending = nested_vmx_preemption_timer_pending, .get_state = vmx_get_nested_state, .set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
- .get_nested_state_pages = vmx_get_nested_state_pages, .write_log_dirty = nested_vmx_write_pml_buffer, .enable_evmcs = nested_enable_evmcs, .get_evmcs_version = nested_get_evmcs_version,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9a8969a6dd06..b910aa74ee05 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8802,9 +8802,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
;
else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { r = 0; goto out; }
-- 2.26.2
On 25/01/21 20:16, Sean Christopherson wrote:
} +static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) +{
- if (!nested_get_evmcs_page(vcpu))
return false;
- if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
nested_get_evmcs_page() will get called twice in the common case of is_guest_mode() == true. I can't tell if that will ever be fatal, but it's definitely weird. Maybe this?
if (!is_guest_mode(vcpu)) return nested_get_evmcs_page(vcpu);
return nested_get_vmcs12_pages(vcpu);
I wouldn't say there is a common case; however the idea was to remove the call to nested_get_evmcs_page from nested_get_vmcs12_pages, since that one is only needed after KVM_GET_NESTED_STATE and not during VMLAUNCH/VMRESUME.
Paolo
On Mon, Jan 25, 2021, Paolo Bonzini wrote:
On 25/01/21 20:16, Sean Christopherson wrote:
} +static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) +{
- if (!nested_get_evmcs_page(vcpu))
return false;
- if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
nested_get_evmcs_page() will get called twice in the common case of is_guest_mode() == true. I can't tell if that will ever be fatal, but it's definitely weird. Maybe this?
if (!is_guest_mode(vcpu)) return nested_get_evmcs_page(vcpu);
return nested_get_vmcs12_pages(vcpu);
I wouldn't say there is a common case;
Eh, I would argue that it is more common to do KVM_REQ_GET_NESTED_STATE_PAGES with is_guest_mode() than it is with !is_guest_mode(), as the latter is valid if and only if eVMCS is in use. But, I think we're only vying for internet points. :-)
however the idea was to remove the call to nested_get_evmcs_page from nested_get_vmcs12_pages, since that one is only needed after KVM_GET_NESTED_STATE and not during VMLAUNCH/VMRESUME.
I'm confused, this patch explicitly adds a call to nested_get_evmcs_page() in nested_get_vmcs12_pages().
On 25/01/21 20:53, Sean Christopherson wrote:
Eh, I would argue that it is more common to do KVM_REQ_GET_NESTED_STATE_PAGES with is_guest_mode() than it is with !is_guest_mode(), as the latter is valid if and only if eVMCS is in use. But, I think we're only vying for internet points.:-)
however the idea was to remove the call to nested_get_evmcs_page from nested_get_vmcs12_pages, since that one is only needed after KVM_GET_NESTED_STATE and not during VMLAUNCH/VMRESUME.
I'm confused, this patch explicitly adds a call to nested_get_evmcs_page() in nested_get_vmcs12_pages().
What I really meant is that the patch was wrong. :/
I'll send my pull request to Linus without this one, and include it later this week.
Paolo
linux-stable-mirror@lists.linaro.org