From: Oliver Upton oupton@google.com
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its parameters in x1-x17. Given this, there are two deviations in the SMC64 call performed by the steal_time test: the function identifier is assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones drjones@redhat.com Signed-off-by: Oliver Upton oupton@google.com Reviewed-by: Andrew Jones drjones@redhat.com Message-Id: 20210921171121.2148982-3-oupton@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/kvm/steal_time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index fcc840088c91..7daedee3e7ee 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -120,12 +120,12 @@ struct st_time { uint64_t st_time; };
-static int64_t smccc(uint32_t func, uint32_t arg) +static int64_t smccc(uint32_t func, uint64_t arg) { unsigned long ret;
asm volatile( - "mov x0, %1\n" + "mov w0, %w1\n" "mov x1, %2\n" "hvc #0\n" "mov %0, x0\n"
From: Haimin Zhang tcs_kernel@tencent.com
[ Upstream commit eb7511bf9182292ef1df1082d23039e856d1ddfb ]
Check the return of init_srcu_struct(), which can fail due to OOM, when initializing the page track mechanism. Lack of checking leads to a NULL pointer deref found by a modified syzkaller.
Reported-by: TCS Robot tcs_robot@tencent.com Signed-off-by: Haimin Zhang tcs_kernel@tencent.com Message-Id: 1630636626-12262-1-git-send-email-tcs_kernel@tencent.com [Move the call towards the beginning of kvm_arch_init_vm. - Paolo] Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/kvm_page_track.h | 2 +- arch/x86/kvm/mmu/page_track.c | 4 ++-- arch/x86/kvm/x86.c | 7 ++++++- 3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 87bd6025d91d..6a5f3acf2b33 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); };
-void kvm_page_track_init(struct kvm *kvm); +int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 8443a675715b..81cf4babbd0b 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm) cleanup_srcu_struct(&head->track_srcu); }
-void kvm_page_track_init(struct kvm *kvm) +int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head; - init_srcu_struct(&head->track_srcu); INIT_HLIST_HEAD(&head->track_notifier_list); + return init_srcu_struct(&head->track_srcu); }
/* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 75c59ad27e9f..d65da3b5837b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10392,9 +10392,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { + int ret; + if (type) return -EINVAL;
+ ret = kvm_page_track_init(kvm); + if (ret) + return ret; + INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); @@ -10421,7 +10427,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_hv_init_vm(kvm); - kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm);
return kvm_x86_ops.vm_init(kvm);
On 06/10/21 13:12, Sasha Levin wrote:
From: Haimin Zhang tcs_kernel@tencent.com
[ Upstream commit eb7511bf9182292ef1df1082d23039e856d1ddfb ]
Check the return of init_srcu_struct(), which can fail due to OOM, when initializing the page track mechanism. Lack of checking leads to a NULL pointer deref found by a modified syzkaller.
Reported-by: TCS Robot tcs_robot@tencent.com Signed-off-by: Haimin Zhang tcs_kernel@tencent.com Message-Id: 1630636626-12262-1-git-send-email-tcs_kernel@tencent.com [Move the call towards the beginning of kvm_arch_init_vm. - Paolo] Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/include/asm/kvm_page_track.h | 2 +- arch/x86/kvm/mmu/page_track.c | 4 ++-- arch/x86/kvm/x86.c | 7 ++++++- 3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 87bd6025d91d..6a5f3acf2b33 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -void kvm_page_track_init(struct kvm *kvm); +int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *slot); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 8443a675715b..81cf4babbd0b 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm) cleanup_srcu_struct(&head->track_srcu); } -void kvm_page_track_init(struct kvm *kvm) +int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head;
- init_srcu_struct(&head->track_srcu); INIT_HLIST_HEAD(&head->track_notifier_list);
- return init_srcu_struct(&head->track_srcu); }
/* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 75c59ad27e9f..d65da3b5837b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10392,9 +10392,15 @@ void kvm_arch_free_vm(struct kvm *kvm) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) {
- int ret;
- if (type) return -EINVAL;
- ret = kvm_page_track_init(kvm);
- if (ret)
return ret;
- INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -10421,7 +10427,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); kvm_hv_init_vm(kvm);
- kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm);
return kvm_x86_ops.vm_init(kvm);
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Sergey Senozhatsky senozhatsky@chromium.org
[ Upstream commit ae232ea460888dc5a8b37e840c553b02521fbf18 ]
grow_halt_poll_ns() ignores values between 0 and halt_poll_ns_grow_start (10000 by default). However, when we shrink halt_poll_ns we may fall way below halt_poll_ns_grow_start and endup with halt_poll_ns values that don't make a lot of sense: like 1 or 9, or 19.
VCPU1 trace (halt_poll_ns_shrink equals 2):
VCPU1 grow 10000 VCPU1 shrink 5000 VCPU1 shrink 2500 VCPU1 shrink 1250 VCPU1 shrink 625 VCPU1 shrink 312 VCPU1 shrink 156 VCPU1 shrink 78 VCPU1 shrink 39 VCPU1 shrink 19 VCPU1 shrink 9 VCPU1 shrink 4
Mirror what grow_halt_poll_ns() does and set halt_poll_ns to 0 as soon as new shrink-ed halt_poll_ns value falls below halt_poll_ns_grow_start.
Signed-off-by: Sergey Senozhatsky senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Message-Id: 20210902031100.252080-1-senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/kvm_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0e4310c415a8..57c0c3b18bde 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2756,15 +2756,19 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) { - unsigned int old, val, shrink; + unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns; shrink = READ_ONCE(halt_poll_ns_shrink); + grow_start = READ_ONCE(halt_poll_ns_grow_start); if (shrink == 0) val = 0; else val /= shrink;
+ if (val < grow_start) + val = 0; + vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); }
On 06/10/21 13:12, Sasha Levin wrote:
From: Sergey Senozhatsky senozhatsky@chromium.org
[ Upstream commit ae232ea460888dc5a8b37e840c553b02521fbf18 ]
grow_halt_poll_ns() ignores values between 0 and halt_poll_ns_grow_start (10000 by default). However, when we shrink halt_poll_ns we may fall way below halt_poll_ns_grow_start and endup with halt_poll_ns values that don't make a lot of sense: like 1 or 9, or 19.
VCPU1 trace (halt_poll_ns_shrink equals 2):
VCPU1 grow 10000 VCPU1 shrink 5000 VCPU1 shrink 2500 VCPU1 shrink 1250 VCPU1 shrink 625 VCPU1 shrink 312 VCPU1 shrink 156 VCPU1 shrink 78 VCPU1 shrink 39 VCPU1 shrink 19 VCPU1 shrink 9 VCPU1 shrink 4
Mirror what grow_halt_poll_ns() does and set halt_poll_ns to 0 as soon as new shrink-ed halt_poll_ns value falls below halt_poll_ns_grow_start.
Signed-off-by: Sergey Senozhatsky senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Message-Id: 20210902031100.252080-1-senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
virt/kvm/kvm_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0e4310c415a8..57c0c3b18bde 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2756,15 +2756,19 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) {
- unsigned int old, val, shrink;
- unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns; shrink = READ_ONCE(halt_poll_ns_shrink);
- grow_start = READ_ONCE(halt_poll_ns_grow_start); if (shrink == 0) val = 0; else val /= shrink;
- if (val < grow_start)
val = 0;
- vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); }
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Maxim Levitsky mlevitsk@redhat.com
[ Upstream commit c42dec148b3e1a88835e275b675e5155f99abd43 ]
Since no actual VM entry happened, the VM exit information is stale. To avoid this, synthesize an invalid VM guest state VM exit.
Suggested-by: Sean Christopherson seanjc@google.com Signed-off-by: Maxim Levitsky mlevitsk@redhat.com Message-Id: 20210913140954.165665-6-mlevitsk@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/vmx/vmx.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fcd8bcb7e0ea..e3af56f05a37 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6670,10 +6670,21 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->soft_vnmi_blocked)) vmx->loaded_vmcs->entry_time = ktime_get();
- /* Don't enter VMX if guest state is invalid, let the exit handler - start emulation until we arrive back to a valid state */ - if (vmx->emulation_required) + /* + * Don't enter VMX if guest state is invalid, let the exit handler + * start emulation until we arrive back to a valid state. Synthesize a + * consistency check VM-Exit due to invalid guest state and bail. + */ + if (unlikely(vmx->emulation_required)) { + vmx->fail = 0; + vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; + vmx->exit_reason.failed_vmentry = 1; + kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); + vmx->exit_qualification = ENTRY_FAIL_DEFAULT; + kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); + vmx->exit_intr_info = 0; return EXIT_FASTPATH_NONE; + }
if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false;
On 06/10/21 13:12, Sasha Levin wrote:
From: Maxim Levitsky mlevitsk@redhat.com
[ Upstream commit c42dec148b3e1a88835e275b675e5155f99abd43 ]
Since no actual VM entry happened, the VM exit information is stale. To avoid this, synthesize an invalid VM guest state VM exit.
Suggested-by: Sean Christopherson seanjc@google.com Signed-off-by: Maxim Levitsky mlevitsk@redhat.com Message-Id: 20210913140954.165665-6-mlevitsk@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/kvm/vmx/vmx.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fcd8bcb7e0ea..e3af56f05a37 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6670,10 +6670,21 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->soft_vnmi_blocked)) vmx->loaded_vmcs->entry_time = ktime_get();
- /* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
- /*
* Don't enter VMX if guest state is invalid, let the exit handler
* start emulation until we arrive back to a valid state. Synthesize a
* consistency check VM-Exit due to invalid guest state and bail.
*/
- if (unlikely(vmx->emulation_required)) {
vmx->fail = 0;
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
vmx->exit_reason.failed_vmentry = 1;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
return EXIT_FASTPATH_NONE;vmx->exit_intr_info = 0;
- }
if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false;
NACK for this one.
The others are good, I'll ack them individually though. Thanks for setting this up!
Paolo
From: Fares Mehanna faresx@amazon.de
[ Upstream commit e1fc1553cd78292ab3521c94c9dd6e3e70e606a1 ]
Intel PMU MSRs is in msrs_to_save_all[], so add AMD PMU MSRs to have a consistent behavior between Intel and AMD when using KVM_GET_MSRS, KVM_SET_MSRS or KVM_GET_MSR_INDEX_LIST.
We have to add legacy and new MSRs to handle guests running without X86_FEATURE_PERFCTR_CORE.
Signed-off-by: Fares Mehanna faresx@amazon.de Message-Id: 20210915133951.22389-1-faresx@amazon.de Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d65da3b5837b..b885063dc393 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1250,6 +1250,13 @@ static const u32 msrs_to_save_all[] = { MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, + + MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, + MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, + MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, + MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, + MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, + MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
On 06/10/21 13:12, Sasha Levin wrote:
From: Fares Mehanna faresx@amazon.de
[ Upstream commit e1fc1553cd78292ab3521c94c9dd6e3e70e606a1 ]
Intel PMU MSRs is in msrs_to_save_all[], so add AMD PMU MSRs to have a consistent behavior between Intel and AMD when using KVM_GET_MSRS, KVM_SET_MSRS or KVM_GET_MSR_INDEX_LIST.
We have to add legacy and new MSRs to handle guests running without X86_FEATURE_PERFCTR_CORE.
Signed-off-by: Fares Mehanna faresx@amazon.de Message-Id: 20210915133951.22389-1-faresx@amazon.de Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d65da3b5837b..b885063dc393 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1250,6 +1250,13 @@ static const u32 msrs_to_save_all[] = { MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
- MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
- MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
- MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
- MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
- MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
- MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Maxim Levitsky mlevitsk@redhat.com
[ Upstream commit aee77e1169c1900fe4248dc186962e745b479d9e ]
In svm_clear_vintr we try to restore the virtual interrupt injection that might be pending, but we fail to restore the interrupt vector.
Signed-off-by: Maxim Levitsky mlevitsk@redhat.com Message-Id: 20210914154825.104886-2-mlevitsk@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/svm/svm.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1c23aee3778c..5e1d7396a6b8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1497,6 +1497,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm) (svm->nested.ctl.int_ctl & V_TPR_MASK)); svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & V_IRQ_INJECTION_BITS_MASK; + + svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; }
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
On 06/10/21 13:12, Sasha Levin wrote:
From: Maxim Levitsky mlevitsk@redhat.com
[ Upstream commit aee77e1169c1900fe4248dc186962e745b479d9e ]
In svm_clear_vintr we try to restore the virtual interrupt injection that might be pending, but we fail to restore the interrupt vector.
Signed-off-by: Maxim Levitsky mlevitsk@redhat.com Message-Id: 20210914154825.104886-2-mlevitsk@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/kvm/svm/svm.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1c23aee3778c..5e1d7396a6b8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1497,6 +1497,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm) (svm->nested.ctl.int_ctl & V_TPR_MASK)); svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & V_IRQ_INJECTION_BITS_MASK;
}svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Thomas Gleixner tglx@linutronix.de
[ Upstream commit f2ff7147c6834f244b8ce636b12e71a3bd044629 ]
The hrtimer callback pcsp_do_timer() prepares rearming of the timer with hrtimer_forward(). hrtimer_forward() is intended to provide a mechanism to forward the expiry time of the hrtimer by a multiple of the period argument so that the expiry time greater than the time provided in the 'now' argument.
pcsp_do_timer() invokes hrtimer_forward() with the current timer expiry time as 'now' argument. That's providing a periodic timer expiry, but is not really robust when the timer callback is delayed so that the resulting new expiry time is already in the past which causes the callback to be invoked immediately again. If the timer is delayed then the back to back invocation is not really making it better than skipping the missed periods. Sound is distorted in any case.
Use hrtimer_forward_now() which ensures that the next expiry is in the future. This prevents hogging the CPU in the timer expiry code and allows later on to remove hrtimer_forward() from the public interfaces.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: alsa-devel@alsa-project.org Cc: Takashi Iwai tiwai@suse.com Cc: Jaroslav Kysela perex@perex.cz Link: https://lore.kernel.org/r/20210923153339.623208460@linutronix.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/drivers/pcsp/pcsp_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index ed40d0f7432c..773db4bf0876 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c @@ -143,7 +143,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) if (pointer_update) pcsp_pointer_update(chip);
- hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns)); + hrtimer_forward_now(handle, ns_to_ktime(ns));
return HRTIMER_RESTART; }
On 06/10/21 13:12, Sasha Levin wrote:
From: Oliver Upton oupton@google.com
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its parameters in x1-x17. Given this, there are two deviations in the SMC64 call performed by the steal_time test: the function identifier is assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones drjones@redhat.com Signed-off-by: Oliver Upton oupton@google.com Reviewed-by: Andrew Jones drjones@redhat.com Message-Id: 20210921171121.2148982-3-oupton@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
tools/testing/selftests/kvm/steal_time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index fcc840088c91..7daedee3e7ee 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -120,12 +120,12 @@ struct st_time { uint64_t st_time; }; -static int64_t smccc(uint32_t func, uint32_t arg) +static int64_t smccc(uint32_t func, uint64_t arg) { unsigned long ret; asm volatile(
"mov x0, %1\n"
"mov x1, %2\n" "hvc #0\n" "mov %0, x0\n""mov w0, %w1\n"
Just fixing tests, but anyway:
Acked-by: Paolo Bonzini pbonzini@redhat.com
On Wed, Oct 06, 2021 at 01:22:31PM +0200, Paolo Bonzini wrote:
On 06/10/21 13:12, Sasha Levin wrote:
From: Oliver Upton oupton@google.com
[ Upstream commit 01f91acb55be7aac3950b89c458bcea9ef6e4f49 ]
The SMC64 calling convention passes a function identifier in w0 and its parameters in x1-x17. Given this, there are two deviations in the SMC64 call performed by the steal_time test: the function identifier is assigned to a 64 bit register and the parameter is only 32 bits wide.
Align the call with the SMCCC by using a 32 bit register to handle the function identifier and increasing the parameter width to 64 bits.
Suggested-by: Andrew Jones drjones@redhat.com Signed-off-by: Oliver Upton oupton@google.com Reviewed-by: Andrew Jones drjones@redhat.com Message-Id: 20210921171121.2148982-3-oupton@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
tools/testing/selftests/kvm/steal_time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index fcc840088c91..7daedee3e7ee 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -120,12 +120,12 @@ struct st_time { uint64_t st_time; }; -static int64_t smccc(uint32_t func, uint32_t arg) +static int64_t smccc(uint32_t func, uint64_t arg) { unsigned long ret; asm volatile(
"mov x0, %1\n"
"mov x1, %2\n" "hvc #0\n" "mov %0, x0\n""mov w0, %w1\n"
Just fixing tests, but anyway:
Acked-by: Paolo Bonzini pbonzini@redhat.com
I'll queue up all the ones you acked, thanks!
linux-stable-mirror@lists.linaro.org