The patch below does not apply to the 6.12-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-6.12.y
git checkout FETCH_HEAD
git cherry-pick -x bcd1383516bb5a6f72b2d1e7f7ad42c4a14837d1
# <resolve conflicts, build, test, etc.>
git commit -s
git send-email --to '<stable(a)vger.kernel.org>' --in-reply-to '2025101352-joylessly-yoyo-b1e8@gregkh' --subject-prefix 'PATCH 6.12.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From bcd1383516bb5a6f72b2d1e7f7ad42c4a14837d1 Mon Sep 17 00:00:00 2001
From: Kai Vehmanen <kai.vehmanen(a)linux.intel.com>
Date: Thu, 2 Oct 2025 10:47:15 +0300
Subject: [PATCH] ASoC: SOF: ipc4-pcm: fix delay calculation when DSP resamples
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When the sampling rates going in (host) and out (dai) from the DSP
are different, the IPC4 delay reporting does not work correctly.
Add support for this case by scaling the all raw position values to
a common timebase before calculating real-time delay for the PCM.
Cc: stable(a)vger.kernel.org
Fixes: 0ea06680dfcb ("ASoC: SOF: ipc4-pcm: Correct the delay calculation")
Signed-off-by: Kai Vehmanen <kai.vehmanen(a)linux.intel.com>
Reviewed-by: Péter Ujfalusi <peter.ujfalusi(a)linux.intel.com>
Reviewed-by: Bard Liao <yung-chuan.liao(a)linux.intel.com>
Signed-off-by: Peter Ujfalusi <peter.ujfalusi(a)linux.intel.com>
Link: https://patch.msgid.link/20251002074719.2084-2-peter.ujfalusi@linux.intel.c…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index cb9a06792a47..769ba4fed56a 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -19,12 +19,14 @@
* struct sof_ipc4_timestamp_info - IPC4 timestamp info
* @host_copier: the host copier of the pcm stream
* @dai_copier: the dai copier of the pcm stream
- * @stream_start_offset: reported by fw in memory window (converted to frames)
- * @stream_end_offset: reported by fw in memory window (converted to frames)
+ * @stream_start_offset: reported by fw in memory window (converted to
+ * frames at host_copier sampling rate)
+ * @stream_end_offset: reported by fw in memory window (converted to
+ * frames at host_copier sampling rate)
* @llp_offset: llp offset in memory window
- * @boundary: wrap boundary should be used for the LLP frame counter
* @delay: Calculated and stored in pointer callback. The stored value is
- * returned in the delay callback.
+ * returned in the delay callback. Expressed in frames at host copier
+ * sampling rate.
*/
struct sof_ipc4_timestamp_info {
struct sof_ipc4_copier *host_copier;
@@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info {
u64 stream_end_offset;
u32 llp_offset;
- u64 boundary;
snd_pcm_sframes_t delay;
};
@@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv {
bool chain_dma_allocated;
};
+/*
+ * Modulus to use to compare host and link position counters. The sampling
+ * rates may be different, so the raw hardware counters will wrap
+ * around at different times. To calculate differences, use
+ * DELAY_BOUNDARY as a common modulus. This value must be smaller than
+ * the wrap-around point of any hardware counter, and larger than any
+ * valid delay measurement.
+ */
+#define DELAY_BOUNDARY U32_MAX
+
static inline struct sof_ipc4_timestamp_info *
sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
{
@@ -1049,6 +1060,35 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
return 0;
}
+static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value)
+{
+ u64 dai_rate, host_rate;
+
+ if (!time_info->dai_copier || !time_info->host_copier)
+ return value;
+
+ /*
+ * copiers do not change sampling rate, so we can use the
+ * out_format independently of stream direction
+ */
+ dai_rate = time_info->dai_copier->data.out_format.sampling_frequency;
+ host_rate = time_info->host_copier->data.out_format.sampling_frequency;
+
+ if (!dai_rate || !host_rate || dai_rate == host_rate)
+ return value;
+
+ /* take care not to overflow u64, rates can be up to 768000 */
+ if (value > U32_MAX) {
+ value = div64_u64(value, dai_rate);
+ value *= host_rate;
+ } else {
+ value *= host_rate;
+ value = div64_u64(value, dai_rate);
+ }
+
+ return value;
+}
+
static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_sof_pcm_stream *sps,
@@ -1099,14 +1139,13 @@ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
time_info->stream_end_offset = ppl_reg.stream_end_offset;
do_div(time_info->stream_end_offset, dai_sample_size);
+ /* convert to host frame time */
+ time_info->stream_start_offset =
+ sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset);
+ time_info->stream_end_offset =
+ sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset);
+
out:
- /*
- * Calculate the wrap boundary need to be used for delay calculation
- * The host counter is in bytes, it will wrap earlier than the frames
- * based link counter.
- */
- time_info->boundary = div64_u64(~((u64)0),
- frames_to_bytes(substream->runtime, 1));
/* Initialize the delay value to 0 (no delay) */
time_info->delay = 0;
@@ -1149,6 +1188,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
/* For delay calculation we need the host counter */
host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
+
+ /* Store the original value to host_ptr */
host_ptr = host_cnt;
/* convert the host_cnt to frames */
@@ -1167,6 +1208,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
}
+
+ dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt);
dai_cnt += time_info->stream_end_offset;
/* In two cases dai dma counter is not accurate
@@ -1200,8 +1243,9 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
dai_cnt -= time_info->stream_start_offset;
}
- /* Wrap the dai counter at the boundary where the host counter wraps */
- div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
+ /* Convert to a common base before comparisons */
+ dai_cnt &= DELAY_BOUNDARY;
+ host_cnt &= DELAY_BOUNDARY;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
head_cnt = host_cnt;
@@ -1211,14 +1255,11 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
tail_cnt = host_cnt;
}
- if (head_cnt < tail_cnt) {
- time_info->delay = time_info->boundary - tail_cnt + head_cnt;
- goto out;
- }
+ if (unlikely(head_cnt < tail_cnt))
+ time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt;
+ else
+ time_info->delay = head_cnt - tail_cnt;
- time_info->delay = head_cnt - tail_cnt;
-
-out:
/*
* Convert the host byte counter to PCM pointer which wraps in buffer
* and it is in frames
The patch titled
Subject: mm/damon/core: use damos_commit_quota_goal() for new goal commit
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-damon-core-use-damos_commit_quota_goal-for-new-goal-commit.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: SeongJae Park <sj(a)kernel.org>
Subject: mm/damon/core: use damos_commit_quota_goal() for new goal commit
Date: Mon, 13 Oct 2025 17:18:44 -0700
When damos_commit_quota_goals() is called for adding new DAMOS quota goals
of DAMOS_QUOTA_USER_INPUT metric, current_value fields of the new goals
should be also set as requested.
However, damos_commit_quota_goals() is not updating the field for the
case, since it is setting only metrics and target values using
damos_new_quota_goal(), and metric-optional union fields using
damos_commit_quota_goal_union(). As a result, users could see the first
current_value parameter that committed online with a new quota goal is
ignored. Users are assumed to commit the current_value for
DAMOS_QUOTA_USER_INPUT quota goals, since it is being used as a feedback.
Hence the real impact would be subtle. That said, this is obviously not
intended behavior.
Fix the issue by using damos_commit_quota_goal() which sets all quota goal
parameters, instead of damos_commit_quota_goal_union(), which sets only
the union fields.
Link: https://lkml.kernel.org/r/20251014001846.279282-1-sj@kernel.org
Fixes: 1aef9df0ee90 ("mm/damon/core: commit damos_quota_goal->nid")
Signed-off-by: SeongJae Park <sj(a)kernel.org>
Cc: <stable(a)vger.kernel.org> [6.16+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/mm/damon/core.c~mm-damon-core-use-damos_commit_quota_goal-for-new-goal-commit
+++ a/mm/damon/core.c
@@ -835,7 +835,7 @@ int damos_commit_quota_goals(struct damo
src_goal->metric, src_goal->target_value);
if (!new_goal)
return -ENOMEM;
- damos_commit_quota_goal_union(new_goal, src_goal);
+ damos_commit_quota_goal(new_goal, src_goal);
damos_add_quota_goal(dst, new_goal);
}
return 0;
_
Patches currently in -mm which might be from sj(a)kernel.org are
mm-damon-sysfs-catch-commit-test-ctx-alloc-failure.patch
mm-damon-sysfs-dealloc-commit-test-ctx-always.patch
mm-damon-core-fix-list_add_tail-call-on-damon_call.patch
mm-damon-core-use-damos_commit_quota_goal-for-new-goal-commit.patch
mm-zswap-remove-unnecessary-dlen-writes-for-incompressible-pages.patch
mm-zswap-fix-typos-s-zwap-zswap.patch
mm-zswap-s-red-black-tree-xarray.patch
docs-admin-guide-mm-zswap-s-red-black-tree-xarray.patch
The patch titled
Subject: mm/damon/core: fix potential memory leak by cleaning ops_filter in damon_destroy_scheme
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-damon-core-fix-potential-memory-leak-by-cleaning-ops_filter-in-damon_destroy_scheme.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Enze Li <lienze(a)kylinos.cn>
Subject: mm/damon/core: fix potential memory leak by cleaning ops_filter in damon_destroy_scheme
Date: Tue, 14 Oct 2025 16:42:25 +0800
Currently, damon_destroy_scheme() only cleans up the filter list but
leaves ops_filter untouched, which could lead to memory leaks when a
scheme is destroyed.
This patch ensures both filter and ops_filter are properly freed in
damon_destroy_scheme(), preventing potential memory leaks.
Link: https://lkml.kernel.org/r/20251014084225.313313-1-lienze@kylinos.cn
Fixes: ab82e57981d0 ("mm/damon/core: introduce damos->ops_filters")
Signed-off-by: Enze Li <lienze(a)kylinos.cn>
Reviewed-by: SeongJae Park <sj(a)kernel.org>
Tested-by: SeongJae Park <sj(a)kernel.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/core.c | 3 +++
1 file changed, 3 insertions(+)
--- a/mm/damon/core.c~mm-damon-core-fix-potential-memory-leak-by-cleaning-ops_filter-in-damon_destroy_scheme
+++ a/mm/damon/core.c
@@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *
damos_for_each_filter_safe(f, next, s)
damos_destroy_filter(f);
+ damos_for_each_ops_filter_safe(f, next, s)
+ damos_destroy_filter(f);
+
kfree(s->migrate_dests.node_id_arr);
kfree(s->migrate_dests.weight_arr);
damon_del_scheme(s);
_
Patches currently in -mm which might be from lienze(a)kylinos.cn are
mm-damon-core-fix-potential-memory-leak-by-cleaning-ops_filter-in-damon_destroy_scheme.patch
The patch titled
Subject: vmw_balloon: indicate success when effectively deflating during migration
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
vmw_balloon-indicate-success-when-effectively-deflating-during-migration.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: David Hildenbrand <david(a)redhat.com>
Subject: vmw_balloon: indicate success when effectively deflating during migration
Date: Tue, 14 Oct 2025 14:44:55 +0200
When migrating a balloon page, we first deflate the old page to then
inflate the new page.
However, if inflating the new page succeeded, we effectively deflated the
old page, reducing the balloon size.
In that case, the migration actually worked: similar to migrating+
immediately deflating the new page. The old page will be freed back to
the buddy.
Right now, the core will leave the page be marked as isolated (as we
returned an error). When later trying to putback that page, we will run
into the WARN_ON_ONCE() in balloon_page_putback().
That handling was changed in commit 3544c4faccb8 ("mm/balloon_compaction:
stop using __ClearPageMovable()"); before that change, we would have
tolerated that way of handling it.
To fix it, let's just return 0 in that case, making the core effectively
just clear the "isolated" flag + freeing it back to the buddy as if the
migration succeeded. Note that the new page will also get freed when the
core puts the last reference.
Note that this also makes it all be more consistent: we will no longer
unisolate the page in the balloon driver while keeping it marked as being
isolated in migration core.
This was found by code inspection.
Link: https://lkml.kernel.org/r/20251014124455.478345-1-david@redhat.com
Fixes: 3544c4faccb8 ("mm/balloon_compaction: stop using __ClearPageMovable()")
Signed-off-by: David Hildenbrand <david(a)redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george(a)broadcom.com>
Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list(a)broadcom.com>
Cc: Arnd Bergmann <arnd(a)arndb.de>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
drivers/misc/vmw_balloon.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
--- a/drivers/misc/vmw_balloon.c~vmw_balloon-indicate-success-when-effectively-deflating-during-migration
+++ a/drivers/misc/vmw_balloon.c
@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct
{
unsigned long status, flags;
struct vmballoon *b;
- int ret;
+ int ret = 0;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
- * change and report failure.
+ * change.
*/
atomic64_dec(&b->size);
- ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (!ret) {
+ if (status == VMW_BALLOON_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
_
Patches currently in -mm which might be from david(a)redhat.com are
vmw_balloon-indicate-success-when-effectively-deflating-during-migration.patch
Using async-profiler
(https://github.com/async-profiler/async-profiler/) on Linux
6.17.1-arch1-1 causes a complete hang of the CPU. This has been
reported by many people at https://github.com/lucko/spark/issues/530.
spark is a piece of software that uses async-profiler internally.
As seen in https://github.com/lucko/spark/issues/530#issuecomment-3339974827,
this was bisected to 18dbcbfabfffc4a5d3ea10290c5ad27f22b0d240 perf:
Fix the POLL_HUP delivery breakage. Reverting this commit on 6.17.1
fixed the issue for me.
Steps to reproduce:
1. Get a copy of async-profiler. I tested both v3 (affects older spark
versions) and v4.1 (latest at time of writing). Unarchive it, this is
<async-profiler-dir>.
2. Set kernel parameters kernel.perf_event_paranoid=1 and
kernel.kptr_restrict=0 as instructed by
https://github.com/async-profiler/async-profiler/blob/fb673227c7fb311f872ce…
3. Install a version of Java that comes with jshell, i.e. Java 9 or
newer. Note: jshell is used for ease of reproduction. Any Java
application that is actively running will work.
4. Run `printf 'int acc; while (true) { acc++; }' | jshell -`. This
will start an infinitely running Java process.
5. Run `jps` and take the PID next to the text RemoteExecutionControl
-- this is the process that was just started.
6. Attach async-profiler to this process by running
`<async-profiler-dir>/bin/asprof -d 1 <PID>`. This will run for one
second, then the system should freeze entirely shortly thereafter.
I triggered a sysrq crash while the system was frozen, and the output
I found in journalctl afterwards is at
https://gist.github.com/octylFractal/76611ee76060051e5efc0c898dd0949e
I'm not sure if that text is actually from the triggered crash, but it
seems relevant. If needed, please tell me how to get the actual crash
report, I'm not sure where it is.
I'm using an AMD Ryzen 9 5900X 12-Core Processor. Given that I've seen
no Intel reports, it may be AMD specific. I don't have an Intel CPU on
hand to test with.
/proc/version: Linux version 6.17.1-arch1-1 (linux@archlinux) (gcc
(GCC) 15.2.1 20250813, GNU ld (GNU Binutils) 2.45.0) #1 SMP
PREEMPT_DYNAMIC Mon, 06 Oct 2025 18:48:29 +0000
Operating System: Arch Linux
uname -mi: x86_64 unknown
For idpf:
Milena fixes a memory leak in the idpf reset logic when the driver resets
with an outstanding Tx timestamp.
For ixgbe and ixgbevf:
Jedrzej fixes an issue with reporting link speed on E610 VFs.
Jedrzej also fixes the VF mailbox API incompatibilities caused by the
confusion with API v1.4, v1.5, and v1.6. The v1.4 API introduced IPSEC
offload, but this was only supported on Linux hosts. The v1.5 API
introduced a new mailbox API which is necessary to resolve issues on ESX
hosts. The v1.6 API introduced a new link management API for E610. Jedrzej
introduces a new v1.7 API with a feature negotiation which enables properly
checking if features such as IPSEC or the ESX mailbox APIs are supported.
This resolves issues with compatibility on different hosts, and aligns the
API across hosts instead of having Linux require custom mailbox API
versions for IPSEC offload.
Koichiro fixes a KASAN use-after-free bug in ixgbe_remove().
Signed-off-by: Jacob Keller <jacob.e.keller(a)intel.com>
---
Changes in v3:
- Rebase and verified compilation issues are resolved.
- Configured b4 to exclude undeliverable addresses.
- Link to v2: https://lore.kernel.org/r/20251003-jk-iwl-net-2025-10-01-v2-0-e59b4141c1b5@…
Changes in v2:
- Drop Emil's idpf_vport_open race fix for now.
- Add my signature.
- Link to v1: https://lore.kernel.org/r/20251001-jk-iwl-net-2025-10-01-v1-0-49fa99e86600@…
---
Jedrzej Jagielski (4):
ixgbevf: fix getting link speed data for E610 devices
ixgbe: handle IXGBE_VF_GET_PF_LINK_STATE mailbox operation
ixgbevf: fix mailbox API compatibility by negotiating supported features
ixgbe: handle IXGBE_VF_FEATURES_NEGOTIATE mbox cmd
Koichiro Den (1):
ixgbe: fix too early devlink_free() in ixgbe_remove()
Milena Olech (1):
idpf: cleanup remaining SKBs in PTP flows
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 15 ++
drivers/net/ethernet/intel/ixgbevf/defines.h | 1 +
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 7 +
drivers/net/ethernet/intel/ixgbevf/mbx.h | 8 +
drivers/net/ethernet/intel/ixgbevf/vf.h | 1 +
drivers/net/ethernet/intel/idpf/idpf_ptp.c | 3 +
.../net/ethernet/intel/idpf/idpf_virtchnl_ptp.c | 1 +
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 +-
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 79 +++++++++
drivers/net/ethernet/intel/ixgbevf/ipsec.c | 10 ++
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 34 +++-
drivers/net/ethernet/intel/ixgbevf/vf.c | 182 +++++++++++++++++----
12 files changed, 310 insertions(+), 34 deletions(-)
---
base-commit: fea8cdf6738a8b25fccbb7b109b440795a0892cb
change-id: 20251001-jk-iwl-net-2025-10-01-92cd2a626ff7
Best regards,
--
Jacob Keller <jacob.e.keller(a)intel.com>
The patch titled
Subject: mm/damon/core: fix list_add_tail() call on damon_call()
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
mm-damon-core-fix-list_add_tail-call-on-damon_call.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: SeongJae Park <sj(a)kernel.org>
Subject: mm/damon/core: fix list_add_tail() call on damon_call()
Date: Tue, 14 Oct 2025 13:59:36 -0700
Each damon_ctx maintains callback requests using a linked list
(damon_ctx->call_controls). When a new callback request is received via
damon_call(), the new request should be added to the list. However, the
function is making a mistake at list_add_tail() invocation: putting the
new item to add and the list head to add it before, in the opposite order.
Because of the linked list manipulation implementation, the new request
can still be reached from the context's list head. But the list items
that were added before the new request are dropped from the list.
As a result, the callbacks are unexpectedly not invocated. Worse yet, if
the dropped callback requests were dynamically allocated, the memory is
leaked. Actually DAMON sysfs interface is using a dynamically allocated
repeat-mode callback request for automatic essential stats update. And
because the online DAMON parameters commit is using a non-repeat-mode
callback request, the issue can easily be reproduced, like below.
# damo start --damos_action stat --refresh_stat 1s
# damo tune --damos_action stat --refresh_stat 1s
The first command dynamically allocates the repeat-mode callback request
for automatic essential stat update. Users can see the essential stats
are automatically updated for every second, using the sysfs interface.
The second command calls damon_commit() with a new callback request that
was made for the commit. As a result, the previously added repeat-mode
callback request is dropped from the list. The automatic stats refresh
stops working, and the memory for the repeat-mode callback request is
leaked. It can be confirmed using kmemleak.
Fix the mistake on the list_add_tail() call.
Link: https://lkml.kernel.org/r/20251014205939.1206-1-sj@kernel.org
Fixes: 004ded6bee11 ("mm/damon: accept parallel damon_call() requests")
Signed-off-by: SeongJae Park <sj(a)kernel.org>
Cc: <stable(a)vger.kernel.org> [6.17+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/damon/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/mm/damon/core.c~mm-damon-core-fix-list_add_tail-call-on-damon_call
+++ a/mm/damon/core.c
@@ -1450,7 +1450,7 @@ int damon_call(struct damon_ctx *ctx, st
INIT_LIST_HEAD(&control->list);
mutex_lock(&ctx->call_controls_lock);
- list_add_tail(&ctx->call_controls, &control->list);
+ list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
return -EINVAL;
_
Patches currently in -mm which might be from sj(a)kernel.org are
mm-damon-sysfs-catch-commit-test-ctx-alloc-failure.patch
mm-damon-sysfs-dealloc-commit-test-ctx-always.patch
mm-damon-core-fix-list_add_tail-call-on-damon_call.patch
mm-zswap-remove-unnecessary-dlen-writes-for-incompressible-pages.patch
mm-zswap-fix-typos-s-zwap-zswap.patch
mm-zswap-s-red-black-tree-xarray.patch
docs-admin-guide-mm-zswap-s-red-black-tree-xarray.patch
We found some discrete AMD graphics devices hide funtion 0 and the whole
is not supposed to be probed.
Since our original purpose is to allow integrated devices (on bus 0) to
be probed without function 0, we can limit the islolated function probing
only on bus 0.
Cc: stable(a)vger.kernel.org
Fixes: a02fd05661d73a8 ("PCI: Extend isolated function probing to LoongArch")
Signed-off-by: Huacai Chen <chenhuacai(a)loongson.cn>
---
Resend to correct the subject line.
drivers/pci/probe.c | 2 +-
include/linux/hypervisor.h | 8 +++++---
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c83e75a0ec12..da6a2aef823a 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2883,7 +2883,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
* a hypervisor that passes through individual PCI
* functions.
*/
- if (!hypervisor_isolated_pci_functions())
+ if (!hypervisor_isolated_pci_functions(bus->number))
break;
}
fn = next_fn(bus, dev, fn);
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index be5417303ecf..30ece04a16d9 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -32,13 +32,15 @@ static inline bool jailhouse_paravirt(void)
#endif /* !CONFIG_X86 */
-static inline bool hypervisor_isolated_pci_functions(void)
+static inline bool hypervisor_isolated_pci_functions(int bus)
{
if (IS_ENABLED(CONFIG_S390))
return true;
- if (IS_ENABLED(CONFIG_LOONGARCH))
- return true;
+ if (IS_ENABLED(CONFIG_LOONGARCH)) {
+ if (bus == 0)
+ return true;
+ }
return jailhouse_paravirt();
}
--
2.47.3
Each damon_ctx maintains callback requests using a linked list
(damon_ctx->call_controls). When a new callback request is received via
damon_call(), the new request should be added to the list. However, the
function is making a mistake at list_add_tail() invocation: putting the
new item to add and the list head to add it before, in the opposite
order. Because of the linked list manipulation implementation, the new
request can still be reached from the context's list head. But the list
items that were added before the new request are dropped from the list.
As a result, the callbacks are unexpectedly not invocated. Worse yet,
if the dropped callback requests were dynamically allocated, the memory
is leaked. Actually DAMON sysfs interface is using a dynamically
allocated repeat-mode callback request for automatic essential stats
update. And because the online DAMON parameters commit is using
a non-repeat-mode callback request, the issue can easily be reproduced,
like below.
# damo start --damos_action stat --refresh_stat 1s
# damo tune --damos_action stat --refresh_stat 1s
The first command dynamically allocates the repeat-mode callback request
for automatic essential stat update. Users can see the essential stats
are automatically updated for every second, using the sysfs interface.
The second command calls damon_commit() with a new callback request that
was made for the commit. As a result, the previously added repeat-mode
callback request is dropped from the list. The automatic stats refresh
stops working, and the memory for the repeat-mode callback request is
leaked. It can be confirmed using kmemleak.
Fix the mistake on the list_add_tail() call.
Fixes: 004ded6bee11 ("mm/damon: accept parallel damon_call() requests")
Cc: <stable(a)vger.kernel.org> # 6.17.x
Signed-off-by: SeongJae Park <sj(a)kernel.org>
---
mm/damon/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 417f33a7868e..109b050c795a 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1453,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
INIT_LIST_HEAD(&control->list);
mutex_lock(&ctx->call_controls_lock);
- list_add_tail(&ctx->call_controls, &control->list);
+ list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
return -EINVAL;
base-commit: 49926cfb24ad064c8c26f8652e8a61bbcde37701
--
2.47.3