From: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol(a)tdk.com>
Currently suspending while sensors are one will result in timestamping
continuing without gap at resume. It can work with monotonic clock but
not with other clocks. Fix that by resetting timestamping.
Fixes: ec74ae9fd37c ("iio: imu: inv_icm42600: add accurate timestamping")
Cc: stable(a)vger.kernel.org
Signed-off-by: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol(a)tdk.com>
---
drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 93b5d7a3339ccff16b21bf6c40ed7b2311317cf4..03139e2e4eddbf37e154de2eb486549bc3bdb284 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -814,6 +814,8 @@ static int inv_icm42600_suspend(struct device *dev)
static int inv_icm42600_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_icm42600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm42600_sensor_state *accel_st = iio_priv(st->indio_accel);
int ret;
mutex_lock(&st->lock);
@@ -834,9 +836,12 @@ static int inv_icm42600_resume(struct device *dev)
goto out_unlock;
/* restore FIFO data streaming */
- if (st->fifo.on)
+ if (st->fifo.on) {
+ inv_sensors_timestamp_reset(&gyro_st->ts);
+ inv_sensors_timestamp_reset(&accel_st->ts);
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
+ }
out_unlock:
mutex_unlock(&st->lock);
---
base-commit: 9dd2270ca0b38ee16094817f4a53e7ba78e31567
change-id: 20241113-inv_icm42600-fix-timestamps-after-suspend-b7e421eaa40c
Best regards,
--
Jean-Baptiste Maneyrol <jean-baptiste.maneyrol(a)tdk.com>
From: Oleg Nesterov <oleg(a)redhat.com>
[ Upstream commit c7b4133c48445dde789ed30b19ccb0448c7593f7 ]
1. Clear utask->xol_vaddr unconditionally, even if this addr is not valid,
xol_free_insn_slot() should never return with utask->xol_vaddr != NULL.
2. Add a comment to explain why do we need to validate slot_addr.
3. Simplify the validation above. We can simply check offset < PAGE_SIZE,
unsigned underflows are fine, it should work if slot_addr < area->vaddr.
4. Kill the unnecessary "slot_nr >= UINSNS_PER_PAGE" check, slot_nr must
be valid if offset < PAGE_SIZE.
The next patches will cleanup this function even more.
Signed-off-by: Oleg Nesterov <oleg(a)redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Link: https://lore.kernel.org/r/20240929144235.GA9471@redhat.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
kernel/events/uprobes.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index b37a6bde8a915..a0f846b2ac1ea 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1633,8 +1633,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
static void xol_free_insn_slot(struct task_struct *tsk)
{
struct xol_area *area;
- unsigned long vma_end;
unsigned long slot_addr;
+ unsigned long offset;
if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
return;
@@ -1643,24 +1643,21 @@ static void xol_free_insn_slot(struct task_struct *tsk)
if (unlikely(!slot_addr))
return;
+ tsk->utask->xol_vaddr = 0;
area = tsk->mm->uprobes_state.xol_area;
- vma_end = area->vaddr + PAGE_SIZE;
- if (area->vaddr <= slot_addr && slot_addr < vma_end) {
- unsigned long offset;
- int slot_nr;
-
- offset = slot_addr - area->vaddr;
- slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
- if (slot_nr >= UINSNS_PER_PAGE)
- return;
+ offset = slot_addr - area->vaddr;
+ /*
+ * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE).
+ * This check can only fail if the "[uprobes]" vma was mremap'ed.
+ */
+ if (offset < PAGE_SIZE) {
+ int slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
clear_bit(slot_nr, area->bitmap);
atomic_dec(&area->slot_count);
smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
if (waitqueue_active(&area->wq))
wake_up(&area->wq);
-
- tsk->utask->xol_vaddr = 0;
}
}
--
2.43.0
From: Oleg Nesterov <oleg(a)redhat.com>
[ Upstream commit c7b4133c48445dde789ed30b19ccb0448c7593f7 ]
1. Clear utask->xol_vaddr unconditionally, even if this addr is not valid,
xol_free_insn_slot() should never return with utask->xol_vaddr != NULL.
2. Add a comment to explain why do we need to validate slot_addr.
3. Simplify the validation above. We can simply check offset < PAGE_SIZE,
unsigned underflows are fine, it should work if slot_addr < area->vaddr.
4. Kill the unnecessary "slot_nr >= UINSNS_PER_PAGE" check, slot_nr must
be valid if offset < PAGE_SIZE.
The next patches will cleanup this function even more.
Signed-off-by: Oleg Nesterov <oleg(a)redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Link: https://lore.kernel.org/r/20240929144235.GA9471@redhat.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
kernel/events/uprobes.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 9ee25351cecac..e3c616085137e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1632,8 +1632,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
static void xol_free_insn_slot(struct task_struct *tsk)
{
struct xol_area *area;
- unsigned long vma_end;
unsigned long slot_addr;
+ unsigned long offset;
if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
return;
@@ -1642,24 +1642,21 @@ static void xol_free_insn_slot(struct task_struct *tsk)
if (unlikely(!slot_addr))
return;
+ tsk->utask->xol_vaddr = 0;
area = tsk->mm->uprobes_state.xol_area;
- vma_end = area->vaddr + PAGE_SIZE;
- if (area->vaddr <= slot_addr && slot_addr < vma_end) {
- unsigned long offset;
- int slot_nr;
-
- offset = slot_addr - area->vaddr;
- slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
- if (slot_nr >= UINSNS_PER_PAGE)
- return;
+ offset = slot_addr - area->vaddr;
+ /*
+ * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE).
+ * This check can only fail if the "[uprobes]" vma was mremap'ed.
+ */
+ if (offset < PAGE_SIZE) {
+ int slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
clear_bit(slot_nr, area->bitmap);
atomic_dec(&area->slot_count);
smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
if (waitqueue_active(&area->wq))
wake_up(&area->wq);
-
- tsk->utask->xol_vaddr = 0;
}
}
--
2.43.0
From: Lukas Wunner <lukas(a)wunner.de>
[ Upstream commit 3b0565c703503f832d6cd7ba805aafa3b330cb9d ]
When extracting a signature component r or s from an ASN.1-encoded
integer, ecdsa_get_signature_rs() subtracts the expected length
"bufsize" from the ASN.1 length "vlen" (both of unsigned type size_t)
and stores the result in "diff" (of signed type ssize_t).
This results in a signed integer overflow if vlen > SSIZE_MAX + bufsize.
The kernel is compiled with -fno-strict-overflow, which implies -fwrapv,
meaning signed integer overflow is not undefined behavior. And the
function does check for overflow:
if (-diff >= bufsize)
return -EINVAL;
So the code is fine in principle but not very obvious. In the future it
might trigger a false-positive with CONFIG_UBSAN_SIGNED_WRAP=y.
Avoid by comparing the two unsigned variables directly and erroring out
if "vlen" is too large.
Signed-off-by: Lukas Wunner <lukas(a)wunner.de>
Reviewed-by: Stefan Berger <stefanb(a)linux.ibm.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
crypto/ecdsa.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index d5a10959ec281..80ef16ae6a40b 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -36,29 +36,24 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t bufsize = ndigits * sizeof(u64);
- ssize_t diff = vlen - bufsize;
const char *d = value;
- if (!value || !vlen)
+ if (!value || !vlen || vlen > bufsize + 1)
return -EINVAL;
- /* diff = 0: 'value' has exacly the right size
- * diff > 0: 'value' has too many bytes; one leading zero is allowed that
- * makes the value a positive integer; error on more
- * diff < 0: 'value' is missing leading zeros
+ /*
+ * vlen may be 1 byte larger than bufsize due to a leading zero byte
+ * (necessary if the most significant bit of the integer is set).
*/
- if (diff > 0) {
+ if (vlen > bufsize) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
- diff--;
d++;
- }
- if (diff)
+ } else {
return -EINVAL;
+ }
}
- if (-diff >= bufsize)
- return -EINVAL;
ecc_digits_from_bytes(d, vlen, dest, ndigits);
--
2.43.0
From: Lukas Wunner <lukas(a)wunner.de>
[ Upstream commit 3b0565c703503f832d6cd7ba805aafa3b330cb9d ]
When extracting a signature component r or s from an ASN.1-encoded
integer, ecdsa_get_signature_rs() subtracts the expected length
"bufsize" from the ASN.1 length "vlen" (both of unsigned type size_t)
and stores the result in "diff" (of signed type ssize_t).
This results in a signed integer overflow if vlen > SSIZE_MAX + bufsize.
The kernel is compiled with -fno-strict-overflow, which implies -fwrapv,
meaning signed integer overflow is not undefined behavior. And the
function does check for overflow:
if (-diff >= bufsize)
return -EINVAL;
So the code is fine in principle but not very obvious. In the future it
might trigger a false-positive with CONFIG_UBSAN_SIGNED_WRAP=y.
Avoid by comparing the two unsigned variables directly and erroring out
if "vlen" is too large.
Signed-off-by: Lukas Wunner <lukas(a)wunner.de>
Reviewed-by: Stefan Berger <stefanb(a)linux.ibm.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
crypto/ecdsa.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index d5a10959ec281..80ef16ae6a40b 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -36,29 +36,24 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t bufsize = ndigits * sizeof(u64);
- ssize_t diff = vlen - bufsize;
const char *d = value;
- if (!value || !vlen)
+ if (!value || !vlen || vlen > bufsize + 1)
return -EINVAL;
- /* diff = 0: 'value' has exacly the right size
- * diff > 0: 'value' has too many bytes; one leading zero is allowed that
- * makes the value a positive integer; error on more
- * diff < 0: 'value' is missing leading zeros
+ /*
+ * vlen may be 1 byte larger than bufsize due to a leading zero byte
+ * (necessary if the most significant bit of the integer is set).
*/
- if (diff > 0) {
+ if (vlen > bufsize) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
- diff--;
d++;
- }
- if (diff)
+ } else {
return -EINVAL;
+ }
}
- if (-diff >= bufsize)
- return -EINVAL;
ecc_digits_from_bytes(d, vlen, dest, ndigits);
--
2.43.0
From: Thomas Richter <tmricht(a)linux.ibm.com>
[ Upstream commit a0bd7dacbd51c632b8e2c0500b479af564afadf3 ]
CPU hotplug remove handling triggers the following function
call sequence:
CPUHP_AP_PERF_S390_SF_ONLINE --> s390_pmu_sf_offline_cpu()
...
CPUHP_AP_PERF_ONLINE --> perf_event_exit_cpu()
The s390 CPUMF sampling CPU hotplug handler invokes:
s390_pmu_sf_offline_cpu()
+--> cpusf_pmu_setup()
+--> setup_pmc_cpu()
+--> deallocate_buffers()
This function de-allocates all sampling data buffers (SDBs) allocated
for that CPU at event initialization. It also clears the
PMU_F_RESERVED bit. The CPU is gone and can not be sampled.
With the event still being active on the removed CPU, the CPU event
hotplug support in kernel performance subsystem triggers the
following function calls on the removed CPU:
perf_event_exit_cpu()
+--> perf_event_exit_cpu_context()
+--> __perf_event_exit_context()
+--> __perf_remove_from_context()
+--> event_sched_out()
+--> cpumsf_pmu_del()
+--> cpumsf_pmu_stop()
+--> hw_perf_event_update()
to stop and remove the event. During removal of the event, the
sampling device driver tries to read out the remaining samples from
the sample data buffers (SDBs). But they have already been freed
(and may have been re-assigned). This may lead to a use after free
situation in which case the samples are most likely invalid. In the
best case the memory has not been reassigned and still contains
valid data.
Remedy this situation and check if the CPU is still in reserved
state (bit PMU_F_RESERVED set). In this case the SDBs have not been
released an contain valid data. This is always the case when
the event is removed (and no CPU hotplug off occured).
If the PMU_F_RESERVED bit is not set, the SDB buffers are gone.
Signed-off-by: Thomas Richter <tmricht(a)linux.ibm.com>
Reviewed-by: Hendrik Brueckner <brueckner(a)linux.ibm.com>
Signed-off-by: Heiko Carstens <hca(a)linux.ibm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
---
arch/s390/kernel/perf_cpum_sf.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 4f251cd624d7e..6047ccb6f8e26 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1862,7 +1862,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
event->hw.state |= PERF_HES_STOPPED;
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
- hw_perf_event_update(event, 1);
+ /* CPU hotplug off removes SDBs. No samples to extract. */
+ if (cpuhw->flags & PMU_F_RESERVED)
+ hw_perf_event_update(event, 1);
event->hw.state |= PERF_HES_UPTODATE;
}
perf_pmu_enable(event->pmu);
--
2.43.0