Enable the secondary VM exit controls to prepare for nested FRED.
Signed-off-by: Xin Li xin3.li@intel.com Tested-by: Shan Kang shan.kang@intel.com --- Documentation/virt/kvm/x86/nested-vmx.rst | 1 + arch/x86/kvm/vmx/capabilities.h | 1 + arch/x86/kvm/vmx/nested.c | 15 ++++++++++++++- arch/x86/kvm/vmx/vmcs12.c | 1 + arch/x86/kvm/vmx/vmcs12.h | 2 ++ arch/x86/kvm/x86.h | 2 +- 6 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/Documentation/virt/kvm/x86/nested-vmx.rst b/Documentation/virt/kvm/x86/nested-vmx.rst index ac2095d41f02..e64ef231f310 100644 --- a/Documentation/virt/kvm/x86/nested-vmx.rst +++ b/Documentation/virt/kvm/x86/nested-vmx.rst @@ -217,6 +217,7 @@ struct shadow_vmcs is ever changed. u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; + u64 secondary_vm_exit_controls; };
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 73bf6618c425..b41c2cde811d 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -38,6 +38,7 @@ struct nested_vmx_msrs { u32 pinbased_ctls_high; u32 exit_ctls_low; u32 exit_ctls_high; + u64 secondary_exit_ctls; u32 entry_ctls_low; u32 entry_ctls_high; u32 misc_low; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8a5fda04e2de..1132e360ff13 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1431,6 +1431,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) case MSR_IA32_VMX_PINBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: case MSR_IA32_VMX_EXIT_CTLS: + case MSR_IA32_VMX_EXIT_CTLS2: case MSR_IA32_VMX_ENTRY_CTLS: /* * The "non-true" VMX capability MSRs are generated from the @@ -1509,6 +1510,9 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) if (msr_index == MSR_IA32_VMX_EXIT_CTLS) *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; break; + case MSR_IA32_VMX_EXIT_CTLS2: + *pdata = msrs->secondary_exit_ctls; + break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr( @@ -2443,6 +2447,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 exec_control &= ~VM_EXIT_LOAD_IA32_EFER; vm_exit_controls_set(vmx, exec_control);
+ if (exec_control & VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) { + exec_control = __secondary_vm_exit_controls_get(vmcs01); + secondary_vm_exit_controls_set(vmx, exec_control); + } + /* * Interrupt/Exception Fields */ @@ -6856,13 +6865,17 @@ static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | - VM_EXIT_CLEAR_BNDCFGS; + VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_ACTIVATE_SECONDARY_CONTROLS; msrs->exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ /* secondary exit controls */ + if (msrs->exit_ctls_high & VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) + rdmsrl(MSR_IA32_VMX_EXIT_CTLS2, msrs->secondary_exit_ctls); + /* We support free control of debug control saving. */ msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; } diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c index 106a72c923ca..98457d7b2b23 100644 --- a/arch/x86/kvm/vmx/vmcs12.c +++ b/arch/x86/kvm/vmx/vmcs12.c @@ -73,6 +73,7 @@ const unsigned short vmcs12_field_offsets[] = { FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), + FIELD(SECONDARY_VM_EXIT_CONTROLS, secondary_vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h index 01936013428b..f50f897b9b5f 100644 --- a/arch/x86/kvm/vmx/vmcs12.h +++ b/arch/x86/kvm/vmx/vmcs12.h @@ -185,6 +185,7 @@ struct __packed vmcs12 { u16 host_gs_selector; u16 host_tr_selector; u16 guest_pml_index; + u64 secondary_vm_exit_controls; };
/* @@ -358,6 +359,7 @@ static inline void vmx_check_vmcs12_offsets(void) CHECK_OFFSET(host_gs_selector, 992); CHECK_OFFSET(host_tr_selector, 994); CHECK_OFFSET(guest_pml_index, 996); + CHECK_OFFSET(secondary_vm_exit_controls, 998); }
extern const unsigned short vmcs12_field_offsets[]; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index c1f1d5696080..498bb6090b1e 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -47,7 +47,7 @@ void kvm_spurious_fault(void); * associated feature that KVM supports for nested virtualization. */ #define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC -#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC +#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_EXIT_CTLS2
#define KVM_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096