From: Haimin Zhang tcs_kernel@tencent.com
[ Upstream commit eb7511bf9182292ef1df1082d23039e856d1ddfb ]
Check the return of init_srcu_struct(), which can fail due to OOM, when initializing the page track mechanism. Lack of checking leads to a NULL pointer deref found by a modified syzkaller.
Reported-by: TCS Robot tcs_robot@tencent.com Signed-off-by: Haimin Zhang tcs_kernel@tencent.com Message-Id: 1630636626-12262-1-git-send-email-tcs_kernel@tencent.com [Move the call towards the beginning of kvm_arch_init_vm. - Paolo] Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/kvm_page_track.h | 2 +- arch/x86/kvm/page_track.c | 4 ++-- arch/x86/kvm/x86.c | 7 ++++++- 3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 172f9749dbb2..5986bd4aacd6 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); };
-void kvm_page_track_init(struct kvm *kvm); +int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free, diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 3521e2d176f2..ef89a3ede425 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -167,13 +167,13 @@ void kvm_page_track_cleanup(struct kvm *kvm) cleanup_srcu_struct(&head->track_srcu); }
-void kvm_page_track_init(struct kvm *kvm) +int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head; - init_srcu_struct(&head->track_srcu); INIT_HLIST_HEAD(&head->track_notifier_list); + return init_srcu_struct(&head->track_srcu); }
/* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f1a0eebdcf64..69e286edb2c9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9585,9 +9585,15 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { + int ret; + if (type) return -EINVAL;
+ ret = kvm_page_track_init(kvm); + if (ret) + return ret; + INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); @@ -9614,7 +9620,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_hv_init_vm(kvm); - kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm);
return kvm_x86_ops->vm_init(kvm);
From: Sergey Senozhatsky senozhatsky@chromium.org
[ Upstream commit ae232ea460888dc5a8b37e840c553b02521fbf18 ]
grow_halt_poll_ns() ignores values between 0 and halt_poll_ns_grow_start (10000 by default). However, when we shrink halt_poll_ns we may fall way below halt_poll_ns_grow_start and endup with halt_poll_ns values that don't make a lot of sense: like 1 or 9, or 19.
VCPU1 trace (halt_poll_ns_shrink equals 2):
VCPU1 grow 10000 VCPU1 shrink 5000 VCPU1 shrink 2500 VCPU1 shrink 1250 VCPU1 shrink 625 VCPU1 shrink 312 VCPU1 shrink 156 VCPU1 shrink 78 VCPU1 shrink 39 VCPU1 shrink 19 VCPU1 shrink 9 VCPU1 shrink 4
Mirror what grow_halt_poll_ns() does and set halt_poll_ns to 0 as soon as new shrink-ed halt_poll_ns value falls below halt_poll_ns_grow_start.
Signed-off-by: Sergey Senozhatsky senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Message-Id: 20210902031100.252080-1-senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/kvm_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 77f84cbca740..f31976010622 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2470,15 +2470,19 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) { - unsigned int old, val, shrink; + unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns; shrink = READ_ONCE(halt_poll_ns_shrink); + grow_start = READ_ONCE(halt_poll_ns_grow_start); if (shrink == 0) val = 0; else val /= shrink;
+ if (val < grow_start) + val = 0; + vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); }
On 06/10/21 13:12, Sasha Levin wrote:
From: Sergey Senozhatsky senozhatsky@chromium.org
[ Upstream commit ae232ea460888dc5a8b37e840c553b02521fbf18 ]
grow_halt_poll_ns() ignores values between 0 and halt_poll_ns_grow_start (10000 by default). However, when we shrink halt_poll_ns we may fall way below halt_poll_ns_grow_start and endup with halt_poll_ns values that don't make a lot of sense: like 1 or 9, or 19.
VCPU1 trace (halt_poll_ns_shrink equals 2):
VCPU1 grow 10000 VCPU1 shrink 5000 VCPU1 shrink 2500 VCPU1 shrink 1250 VCPU1 shrink 625 VCPU1 shrink 312 VCPU1 shrink 156 VCPU1 shrink 78 VCPU1 shrink 39 VCPU1 shrink 19 VCPU1 shrink 9 VCPU1 shrink 4
Mirror what grow_halt_poll_ns() does and set halt_poll_ns to 0 as soon as new shrink-ed halt_poll_ns value falls below halt_poll_ns_grow_start.
Signed-off-by: Sergey Senozhatsky senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Message-Id: 20210902031100.252080-1-senozhatsky@chromium.org Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
virt/kvm/kvm_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 77f84cbca740..f31976010622 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2470,15 +2470,19 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) {
- unsigned int old, val, shrink;
- unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns; shrink = READ_ONCE(halt_poll_ns_shrink);
- grow_start = READ_ONCE(halt_poll_ns_grow_start); if (shrink == 0) val = 0; else val /= shrink;
- if (val < grow_start)
val = 0;
- vcpu->halt_poll_ns = val; trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); }
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Fares Mehanna faresx@amazon.de
[ Upstream commit e1fc1553cd78292ab3521c94c9dd6e3e70e606a1 ]
Intel PMU MSRs is in msrs_to_save_all[], so add AMD PMU MSRs to have a consistent behavior between Intel and AMD when using KVM_GET_MSRS, KVM_SET_MSRS or KVM_GET_MSR_INDEX_LIST.
We have to add legacy and new MSRs to handle guests running without X86_FEATURE_PERFCTR_CORE.
Signed-off-by: Fares Mehanna faresx@amazon.de Message-Id: 20210915133951.22389-1-faresx@amazon.de Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 69e286edb2c9..4948bf36bd0a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1239,6 +1239,13 @@ static const u32 msrs_to_save_all[] = { MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, + + MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, + MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, + MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, + MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, + MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, + MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
On 06/10/21 13:12, Sasha Levin wrote:
From: Fares Mehanna faresx@amazon.de
[ Upstream commit e1fc1553cd78292ab3521c94c9dd6e3e70e606a1 ]
Intel PMU MSRs is in msrs_to_save_all[], so add AMD PMU MSRs to have a consistent behavior between Intel and AMD when using KVM_GET_MSRS, KVM_SET_MSRS or KVM_GET_MSR_INDEX_LIST.
We have to add legacy and new MSRs to handle guests running without X86_FEATURE_PERFCTR_CORE.
Signed-off-by: Fares Mehanna faresx@amazon.de Message-Id: 20210915133951.22389-1-faresx@amazon.de Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 69e286edb2c9..4948bf36bd0a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1239,6 +1239,13 @@ static const u32 msrs_to_save_all[] = { MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
- MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
- MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
- MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
- MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
- MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
- MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
Acked-by: Paolo Bonzini pbonzini@redhat.com
From: Thomas Gleixner tglx@linutronix.de
[ Upstream commit f2ff7147c6834f244b8ce636b12e71a3bd044629 ]
The hrtimer callback pcsp_do_timer() prepares rearming of the timer with hrtimer_forward(). hrtimer_forward() is intended to provide a mechanism to forward the expiry time of the hrtimer by a multiple of the period argument so that the expiry time greater than the time provided in the 'now' argument.
pcsp_do_timer() invokes hrtimer_forward() with the current timer expiry time as 'now' argument. That's providing a periodic timer expiry, but is not really robust when the timer callback is delayed so that the resulting new expiry time is already in the past which causes the callback to be invoked immediately again. If the timer is delayed then the back to back invocation is not really making it better than skipping the missed periods. Sound is distorted in any case.
Use hrtimer_forward_now() which ensures that the next expiry is in the future. This prevents hogging the CPU in the timer expiry code and allows later on to remove hrtimer_forward() from the public interfaces.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: alsa-devel@alsa-project.org Cc: Takashi Iwai tiwai@suse.com Cc: Jaroslav Kysela perex@perex.cz Link: https://lore.kernel.org/r/20210923153339.623208460@linutronix.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/drivers/pcsp/pcsp_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index 8f0f05bbc081..23a15d892e50 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c @@ -145,7 +145,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) if (pointer_update) pcsp_pointer_update(chip);
- hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns)); + hrtimer_forward_now(handle, ns_to_ktime(ns));
return HRTIMER_RESTART; }
On 06/10/21 13:12, Sasha Levin wrote:
From: Haimin Zhang tcs_kernel@tencent.com
[ Upstream commit eb7511bf9182292ef1df1082d23039e856d1ddfb ]
Check the return of init_srcu_struct(), which can fail due to OOM, when initializing the page track mechanism. Lack of checking leads to a NULL pointer deref found by a modified syzkaller.
Reported-by: TCS Robot tcs_robot@tencent.com Signed-off-by: Haimin Zhang tcs_kernel@tencent.com Message-Id: 1630636626-12262-1-git-send-email-tcs_kernel@tencent.com [Move the call towards the beginning of kvm_arch_init_vm. - Paolo] Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/x86/include/asm/kvm_page_track.h | 2 +- arch/x86/kvm/page_track.c | 4 ++-- arch/x86/kvm/x86.c | 7 ++++++- 3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 172f9749dbb2..5986bd4aacd6 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node { struct kvm_page_track_notifier_node *node); }; -void kvm_page_track_init(struct kvm *kvm); +int kvm_page_track_init(struct kvm *kvm); void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *free, diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 3521e2d176f2..ef89a3ede425 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -167,13 +167,13 @@ void kvm_page_track_cleanup(struct kvm *kvm) cleanup_srcu_struct(&head->track_srcu); } -void kvm_page_track_init(struct kvm *kvm) +int kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head;
- init_srcu_struct(&head->track_srcu); INIT_HLIST_HEAD(&head->track_notifier_list);
- return init_srcu_struct(&head->track_srcu); }
/* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f1a0eebdcf64..69e286edb2c9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9585,9 +9585,15 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) {
- int ret;
- if (type) return -EINVAL;
- ret = kvm_page_track_init(kvm);
- if (ret)
return ret;
- INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -9614,7 +9620,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); kvm_hv_init_vm(kvm);
- kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm);
return kvm_x86_ops->vm_init(kvm);
Acked-by: Paolo Bonzini pbonzini@redhat.com
linux-stable-mirror@lists.linaro.org