This reverts commit ffd603f214237e250271162a5b325c6199a65382.
Commit ffd603f21423 ("usb: gadget: u_serial: Add null pointer check in
gs_start_io") adds null pointer checks at the beginning of the
gs_start_io() function to prevent a null pointer dereference. However,
these checks are redundant because the function's comment already
requires callers to hold the port_lock and ensure port.tty and port_usb
are not null. All existing callers already follow these rules.
The true cause of the null pointer dereference is a race condition. When
gs_start_io() calls either gs_start_rx() or gs_start_tx(), the port_lock
is temporarily released for usb_ep_queue(). This allows port.tty and
port_usb to be cleared.
Cc: stable(a)vger.kernel.org
Fixes: ffd603f21423 ("usb: gadget: u_serial: Add null pointer check in gs_start_io")
Signed-off-by: Kuen-Han Tsai <khtsai(a)google.com>
---
drivers/usb/gadget/function/u_serial.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index ab544f6824be..c043bdc30d8a 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -544,20 +544,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
- struct usb_ep *ep;
+ struct usb_ep *ep = port->port_usb->out;
int status;
unsigned started;
- if (!port->port_usb || !port->port.tty)
- return -EIO;
-
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
- ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)
--
2.50.0.rc2.692.g299adb8693-goog
Commit 704d3d60fec4 ("drm/etnaviv: don't block scheduler when GPU is still
active") ensured that active jobs are returned to the pending list when
extending the timeout. However, it didn't use the pending list's lock to
manipulate the list, which causes a race condition as the scheduler's
workqueues are running.
Hold the lock while manipulating the scheduler's pending list to prevent
a race.
Cc: stable(a)vger.kernel.org
Fixes: 704d3d60fec4 ("drm/etnaviv: don't block scheduler when GPU is still active")
Signed-off-by: Maíra Canal <mcanal(a)igalia.com>
---
Hi,
I'm proposing this workaround patch to address the race-condition caused
by manipulating the pending list without using its lock. Although I
understand this isn't a complete solution (see [1]), it's not reasonable
to backport the new DRM stat series [2] to the stable branches.
Therefore, I believe the best solution is backporting this fix to the
stable branches, which will fix the race and will keep adding the job
back to the pending list (which will avoid most memory leaks).
[1] https://lore.kernel.org/dri-devel/bcc0ed477f8a6f3bb06665b1756bcb98fb7af871.…
[2] https://lore.kernel.org/dri-devel/20250530-sched-skip-reset-v2-0-c40a8d2d8d…
Best Regards,
- Maíra
---
drivers/gpu/drm/etnaviv/etnaviv_sched.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 76a3a3e517d8..71e2e6b9d713 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,6 +35,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr, primid = 0;
int change;
@@ -89,7 +90,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
- list_add(&sched_job->list, &sched_job->sched->pending_list);
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
--
2.49.0
The patch titled
Subject: maple_tree: fix MA_STATE_PREALLOC flag in mas_preallocate()
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
maple_tree-fix-ma_state_prealloc-flag-in-mas_preallocate.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: "Liam R. Howlett" <Liam.Howlett(a)oracle.com>
Subject: maple_tree: fix MA_STATE_PREALLOC flag in mas_preallocate()
Date: Mon, 16 Jun 2025 14:45:20 -0400
Temporarily clear the preallocation flag when explicitly requesting
allocations. Pre-existing allocations are already counted against the
request through mas_node_count_gfp(), but the allocations will not happen
if the MA_STATE_PREALLOC flag is set. This flag is meant to avoid
re-allocating in bulk allocation mode, and to detect issues with
preallocation calculations.
The MA_STATE_PREALLOC flag should also always be set on zero allocations
so that detection of underflow allocations will print a WARN_ON() during
consumption.
User visible effect of this flaw is a WARN_ON() followed by a null pointer
dereference when subsequent requests for larger number of nodes is
ignored, such as the vma merge retry in mmap_region() caused by drivers
altering the vma flags (which happens in v6.6, at least)
Link: https://lkml.kernel.org/r/20250616184521.3382795-3-Liam.Howlett@oracle.com
Fixes: 54a611b605901 ("Maple Tree: add new data structure")
Signed-off-by: Liam R. Howlett <Liam.Howlett(a)oracle.com>
Reported-by: Zhaoyang Huang <zhaoyang.huang(a)unisoc.com>
Reported-by: Hailong Liu <hailong.liu(a)oppo.com>
Link: https://lore.kernel.org/all/1652f7eb-a51b-4fee-8058-c73af63bacd1@oppo.com/
Link: https://lore.kernel.org/all/20250428184058.1416274-1-Liam.Howlett@oracle.co…
Link: https://lore.kernel.org/all/20250429014754.1479118-1-Liam.Howlett@oracle.co…
Cc: Lorenzo Stoakes <lorenzo.stoakes(a)oracle.com>
Cc: Suren Baghdasaryan <surenb(a)google.com>
Cc: Hailong Liu <hailong.liu(a)oppo.com>
Cc: zhangpeng.00(a)bytedance.com <zhangpeng.00(a)bytedance.com>
Cc: Steve Kang <Steve.Kang(a)unisoc.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: Sidhartha Kumar <sidhartha.kumar(a)oracle.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
lib/maple_tree.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/lib/maple_tree.c~maple_tree-fix-ma_state_prealloc-flag-in-mas_preallocate
+++ a/lib/maple_tree.c
@@ -5527,8 +5527,9 @@ int mas_preallocate(struct ma_state *mas
mas->store_type = mas_wr_store_type(&wr_mas);
request = mas_prealloc_calc(&wr_mas, entry);
if (!request)
- return ret;
+ goto set_flag;
+ mas->mas_flags &= ~MA_STATE_PREALLOC;
mas_node_count_gfp(mas, request, gfp);
if (mas_is_err(mas)) {
mas_set_alloc_req(mas, 0);
@@ -5538,6 +5539,7 @@ int mas_preallocate(struct ma_state *mas
return ret;
}
+set_flag:
mas->mas_flags |= MA_STATE_PREALLOC;
return ret;
}
_
Patches currently in -mm which might be from Liam.Howlett(a)oracle.com are
maple_tree-fix-ma_state_prealloc-flag-in-mas_preallocate.patch
testing-raix-tree-maple-increase-readers-and-reduce-delay-for-faster-machines.patch
From: Ashish Kalra <ashish.kalra(a)amd.com>
Panic notifiers are invoked with RCU read lock held and when the
SNP panic notifier tries to unregister itself from the panic
notifier callback itself it causes a deadlock as notifier
unregistration does RCU synchronization.
Code flow for SNP panic notifier:
snp_shutdown_on_panic() ->
__sev_firmware_shutdown() ->
__sev_snp_shutdown_locked() ->
atomic_notifier_chain_unregister(.., &snp_panic_notifier)
Fix SNP panic notifier to unregister itself during SNP shutdown
only if panic is not in progress.
Reviewed-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Fixes: 19860c3274fb ("crypto: ccp - Register SNP panic notifier only if SNP is enabled")
Signed-off-by: Ashish Kalra <ashish.kalra(a)amd.com>
---
drivers/crypto/ccp/sev-dev.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 8fb94c5f006a..17edc6bf5622 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1787,8 +1787,14 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
/* Reset TMR size back to default */
sev_es_tmr_size = SEV_TMR_SIZE;
--
2.34.1
When the link goes down and comes up, FDMI requests are not sent out
anymore.
Fix bug by turning off FNIC_FDMI_ACTIVE when the link goes down.
Fixes: 09c1e6ab4ab2 ("scsi: fnic: Add and integrate support for FDMI")
Reviewed-by: Sesidhar Baddela <sebaddel(a)cisco.com>
Reviewed-by: Arulprabhu Ponnusamy <arulponn(a)cisco.com>
Reviewed-by: Gian Carlo Boffa <gcboffa(a)cisco.com>
Reviewed-by: Arun Easi <aeasi(a)cisco.com>
Tested-by: Karan Tilak Kumar <kartilak(a)cisco.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Karan Tilak Kumar <kartilak(a)cisco.com>
---
Changes between v3 and v4:
- Incorporate review comments from Dan:
- Remove comments from Cc tag
Changes between v2 and v3:
- Incorporate review comments from Dan:
- Add Cc to stable
Changes between v1 and v2:
- Incorporate review comments from Dan:
- Add Fixes tag
---
drivers/scsi/fnic/fdls_disc.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index 9e9939d41fa8..14691db4d5f9 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -5078,9 +5078,12 @@ void fnic_fdls_link_down(struct fnic_iport_s *iport)
fdls_delete_tport(iport, tport);
}
- if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) {
- timer_delete_sync(&iport->fabric.fdmi_timer);
- iport->fabric.fdmi_pending = 0;
+ if (fnic_fdmi_support == 1) {
+ if (iport->fabric.fdmi_pending > 0) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ }
+ iport->flags &= ~FNIC_FDMI_ACTIVE;
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
--
2.47.1
Hi,
We’re pleased to offer you exclusive access to the “Design Automation Conference 2025” Visitor Contact List—a powerful resource to connect directly with key industry professionals.
Event Recap:-
Date: 22 - 25 Jun 2025
Location: San Francisco, USA
Registrants Counts: 6,286 Visitors Contacts
Data Fields Available: Individual Email Address, Cell Phone Number, Contact Name, Job Title, Company Name, Website, Physical Address, LinkedIn Profile, and more.
This list gives you a direct line to your ideal audience—no gatekeepers, no guesswork.
Want pricing or a sample? Just reply: “Send me pricing” or “Sample please.”
Best regards,
Delilah Murray
Sr. Marketing Manager
Prefer not to receive these emails? Just reply “NOT INTERESTED”.
commit 270aa010620697fb27b8f892cc4e194bc2b7d134 upstream.
Patch series "mm/uffd: Fix vma merge/split", v2.
This series contains two patches that fix vma merge/split for userfaultfd
on two separate issues.
Patch 1 fixes a regression since 6.1+ due to something we overlooked when
converting to maple tree apis. The plan is we use patch 1 to replace the
commit "2f628010799e (mm: userfaultfd: avoid passing an invalid range to
vma_merge())" in mm-hostfixes-unstable tree if possible, so as to bring
uffd vma operations back aligned with the rest code again.
Patch 2 fixes a long standing issue that vma can be left unmerged even if
we can for either uffd register or unregister.
Many thanks to Lorenzo on either noticing this issue from the assert
movement patch, looking at this problem, and also provided a reproducer on
the unmerged vma issue [1].
[1] https://gist.github.com/lorenzo-stoakes/a11a10f5f479e7a977fc456331266e0e
This patch (of 2):
It seems vma merging with uffd paths is broken with either
register/unregister, where right now we can feed wrong parameters to
vma_merge() and it's found by recent patch which moved asserts upwards in
vma_merge() by Lorenzo Stoakes:
https://lore.kernel.org/all/ZFunF7DmMdK05MoF@FVFF77S0Q05N.cambridge.arm.com/
It's possible that "start" is contained within vma but not clamped to its
start. We need to convert this into either "cannot merge" case or "can
merge" case 4 which permits subdivision of prev by assigning vma to prev.
As we loop, each subsequent VMA will be clamped to the start.
This patch will eliminate the report and make sure vma_merge() calls will
become legal again.
One thing to mention is that the "Fixes: 29417d292bd0" below is there only
to help explain where the warning can start to trigger, the real commit to
fix should be 69dbe6daf104. Commit 29417d292bd0 helps us to identify the
issue, but unfortunately we may want to keep it in Fixes too just to ease
kernel backporters for easier tracking.
Link: https://lkml.kernel.org/r/20230517190916.3429499-1-peterx@redhat.com
Link: https://lkml.kernel.org/r/20230517190916.3429499-2-peterx@redhat.com
Fixes: 69dbe6daf104 ("userfaultfd: use maple tree iterator to iterate VMAs")
Signed-off-by: Peter Xu <peterx(a)redhat.com>
Reported-by: Mark Rutland <mark.rutland(a)arm.com>
Reviewed-by: Lorenzo Stoakes <lstoakes(a)gmail.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett(a)oracle.com>
Closes: https://lore.kernel.org/all/ZFunF7DmMdK05MoF@FVFF77S0Q05N.cambridge.arm.com/
Cc: Lorenzo Stoakes <lstoakes(a)gmail.com>
Cc: Mike Rapoport (IBM) <rppt(a)kernel.org>
Cc: Liam R. Howlett <Liam.Howlett(a)oracle.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Jakub Acs <acsjakub(a)amazon.com>
[acsjakub: contextual change - keep call to mas_next()]
Cc: linux-mm(a)kvack.org
---
This backport fixes a security issue - dangling pointer to a VMA in maple
tree. Omitting details in this message to be brief, but happy to provide
if requested.
Since the envelope mentions series fixes 2 separate issues I hope the patch
is acceptable on its own?
fs/userfaultfd.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 82101a2cf933..fcf96f52b2e9 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1426,6 +1426,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (prev != vma)
mas_next(&mas, ULONG_MAX);
+ if (vma->vm_start < start)
+ prev = vma;
+
ret = 0;
do {
cond_resched();
@@ -1603,6 +1606,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (prev != vma)
mas_next(&mas, ULONG_MAX);
+ if (vma->vm_start < start)
+ prev = vma;
+
ret = 0;
do {
cond_resched();
--
2.47.1
Amazon Web Services Development Center Germany GmbH
Tamara-Danz-Str. 13
10243 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597
Currently the 'pispbe_schedule()' function does two things:
1) Tries to assemble a job by inspecting all the video node queues
to make sure all the required buffers are available
2) Submit the job to the hardware
The pispbe_schedule() function is called at:
- video device start_streaming() time
- video device qbuf() time
- irq handler
As assembling a job requires inspecting all queues, it is a rather
time consuming operation which is better not run in IRQ context.
To avoid executing the time consuming job creation in interrupt
context, split the job creation and job scheduling in two distinct
operations. When a well-formed job is created, append it to the
newly introduced 'pispbe->job_queue' where it will be dequeued from
by the scheduling routine.
At start_streaming() and qbuf() time immediately try to schedule a job
if one has been created as the irq handler routine is only called when
a job has completed, and we can't solely rely on it for scheduling new
jobs.
Signed-off-by: Jacopo Mondi <jacopo.mondi(a)ideasonboard.com>
---
Changes in v7:
- Rebased on media-committers/next
- Fix lockdep warning by using the proper spinlock_irq() primitive in
pispbe_prepare_job() which can race with the IRQ handler
- Link to v6: https://lore.kernel.org/r/20240930-pispbe-mainline-split-jobs-handling-v6-v…
v5->v6:
- Make the driver depend on PM
- Simplify the probe() routine by using pm_runtime_
- Remove suspend call from remove()
v4->v5:
- Use appropriate locking constructs:
- spin_lock_irq() for pispbe_prepare_job() called from non irq context
- spin_lock_irqsave() for pispbe_schedule() called from irq context
- Remove hw_lock from ready_queue accesses in stop_streaming and
start_streaming
- Fix trivial indentation mistake in 4/4
v3->v4:
- Expand commit message in 2/4 to explain why removing validation in schedule()
is safe
- Drop ready_lock spinlock
- Use non _irqsave version of safe_guard(spinlock
- Support !CONFIG_PM in 4/4 by calling the enable/disable routines directly
and adjust pm_runtime usage as suggested by Laurent
v2->v3:
- Mark pispbe_runtime_resume() as __maybe_unused
- Add fixes tags where appropriate
v1->v2:
- Add two patches to address Laurent's comments separately
- use scoped_guard() when possible
- Add patch to fix runtime_pm imbalance
---
Jacopo Mondi (4):
media: pisp_be: Drop reference to non-existing function
media: pisp_be: Remove config validation from schedule()
media: pisp_be: Split jobs creation and scheduling
media: pisp_be: Fix pm_runtime underrun in probe
drivers/media/platform/raspberrypi/pisp_be/Kconfig | 1 +
.../media/platform/raspberrypi/pisp_be/pisp_be.c | 187 ++++++++++-----------
2 files changed, 90 insertions(+), 98 deletions(-)
---
base-commit: 5e1ff2314797bf53636468a97719a8222deca9ae
change-id: 20240930-pispbe-mainline-split-jobs-handling-v6-15dc16e11e3a
Best regards,
--
Jacopo Mondi <jacopo.mondi(a)ideasonboard.com>
A buffer overflow vulnerability exists in the USB 9pfs transport layer
where inconsistent size validation between packet header parsing and
actual data copying allows a malicious USB host to overflow heap buffers.
The issue occurs because:
- usb9pfs_rx_header() validates only the declared size in packet header
- usb9pfs_rx_complete() uses req->actual (actual received bytes) for memcpy
This allows an attacker to craft packets with small declared size (bypassing
validation) but large actual payload (triggering overflow in memcpy).
Add validation in usb9pfs_rx_complete() to ensure req->actual does not
exceed the buffer capacity before copying data.
Reported-by: Yuhao Jiang <danisjiang(a)gmail.com>
Fixes: a3be076dc174 ("net/9p/usbg: Add new usb gadget function transport")
Cc: stable(a)vger.kernel.org
Signed-off-by: Yuhao Jiang <danisjiang(a)gmail.com>
---
net/9p/trans_usbg.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
index 6b694f117aef..047a2862fc84 100644
--- a/net/9p/trans_usbg.c
+++ b/net/9p/trans_usbg.c
@@ -242,6 +242,15 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (!p9_rx_req)
return;
+ /* Validate actual received size against buffer capacity */
+ if (req->actual > p9_rx_req->rc.capacity) {
+ dev_err(&cdev->gadget->dev,
+ "received data size %u exceeds buffer capacity %zu\n",
+ req->actual, p9_rx_req->rc.capacity);
+ p9_req_put(usb9pfs->client, p9_rx_req);
+ return;
+ }
+
memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
p9_rx_req->rc.size = req->actual;
--
2.43.0
Henry's bug[1] and fix[2] prompted some further inspection by
Jean.
This series provides fixes for the remaining issues Jean identified, as
well as reworking the channel paths to reduce cleanup required in error
paths. It is based on v6.16-rc1.
Lightly tested under qemu and on an AST2600 EVB. Further testing on
platforms designed around the snoop device appreciated.
[1]: https://bugzilla.kernel.org/show_bug.cgi?id=219934
[2]: https://lore.kernel.org/all/20250401074647.21300-1-bsdhenrymartin@gmail.com/
Signed-off-by: Andrew Jeffery <andrew(a)codeconstruct.com.au>
---
Changes in v2:
- Address comments on v1 from Jean
- Implement channel indexing using enums to avoid unnecessary tests
- Switch to devm_clk_get_enabled()
- Use dev_err_probe() where possible
- Link to v1: https://patch.msgid.link/20250411-aspeed-lpc-snoop-fixes-v1-0-64f522e3ad6f@…
---
Andrew Jeffery (10):
soc: aspeed: lpc-snoop: Cleanup resources in stack-order
soc: aspeed: lpc-snoop: Don't disable channels that aren't enabled
soc: aspeed: lpc-snoop: Ensure model_data is valid
soc: aspeed: lpc-snoop: Constrain parameters in channel paths
soc: aspeed: lpc-snoop: Rename 'channel' to 'index' in channel paths
soc: aspeed: lpc-snoop: Rearrange channel paths
soc: aspeed: lpc-snoop: Switch to devm_clk_get_enabled()
soc: aspeed: lpc-snoop: Use dev_err_probe() where possible
soc: aspeed: lpc-snoop: Consolidate channel initialisation
soc: aspeed: lpc-snoop: Lift channel config to const structs
drivers/soc/aspeed/aspeed-lpc-snoop.c | 224 +++++++++++++++++-----------------
1 file changed, 110 insertions(+), 114 deletions(-)
---
base-commit: 19272b37aa4f83ca52bdf9c16d5d81bdd1354494
change-id: 20250401-aspeed-lpc-snoop-fixes-e5d2883da3a3
Best regards,
--
Andrew Jeffery <andrew(a)codeconstruct.com.au>
Use common wrappers operating directly on the struct sg_table objects to
fix incorrect use of statterlists related calls. dma_unmap_sg() function
has to be called with the number of elements originally passed to the
dma_map_sg() function, not the one returned in sgtable's nents.
CC: stable(a)vger.kernel.org
Fixes: 425902f5c8e3 ("fpga zynq: Use the scatterlist interface")
Signed-off-by: Marek Szyprowski <m.szyprowski(a)samsung.com>
---
v2:
- fixed build break (missing flags parameter)
---
drivers/fpga/zynq-fpga.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git drivers/fpga/zynq-fpga.c drivers/fpga/zynq-fpga.c
index f7e08f7ea9ef..0be0d569589d 100644
--- drivers/fpga/zynq-fpga.c
+++ drivers/fpga/zynq-fpga.c
@@ -406,7 +406,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
if (priv->dma_nelms == 0) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
return -ENOMEM;
@@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
return err;
}
--
2.34.1
On Sun, 15 Jun 2025, "Nautiyal, Ankit K" <ankit.k.nautiyal(a)intel.com> wrote:
> On 6/13/2025 3:06 PM, Jani Nikula wrote:
>> On Fri, 13 Jun 2025, Ankit Nautiyal<ankit.k.nautiyal(a)intel.com> wrote:
>>> *ana_cp_int = max(1, min(ana_cp_int_temp, 127));
>> Unrelated to this patch, but this should be:
>>
>> *ana_cp_int = clamp(ana_cp_int_temp, 1, 127);
>>
>> There's a similar issue with ana_cp_prop also in the file.
>>
> Agreed. Should there be a separate patch for this?
Yes. That's why I emphasized "unrelated to this patch". ;)
BR,
Jani.
--
Jani Nikula, Intel
Use common wrappers operating directly on the struct sg_table objects to
fix incorrect use of statterlists related calls. dma_unmap_sg() function
has to be called with the number of elements originally passed to the
dma_map_sg() function, not the one returned in sgtable's nents.
CC: stable(a)vger.kernel.org
Fixes: 425902f5c8e3 ("fpga zynq: Use the scatterlist interface")
Signed-off-by: Marek Szyprowski <m.szyprowski(a)samsung.com>
---
drivers/fpga/zynq-fpga.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index f7e08f7ea9ef..9bd39d1d4048 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -406,7 +406,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE);
if (priv->dma_nelms == 0) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
return -ENOMEM;
@@ -478,7 +478,7 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE);
return err;
}
--
2.34.1
This patch series attempts to enable the use of xe DRM driver on non-4KiB
kernel page platforms. This involves fixing the ttm/bo interface, as well
as parts of the userspace API to make use of kernel `PAGE_SIZE' for
alignment instead of the assumed `SZ_4K', it also fixes incorrect usage of
`PAGE_SIZE' in the GuC and ring buffer interface code to make sure all
instructions/commands were aligned to 4KiB barriers (per the Programmer's
Manual for the GPUs covered by this DRM driver).
This issue was first discovered and reported by members of the LoongArch
user communities, whose hardware commonly ran on 16KiB-page kernels. The
patch series began on an unassuming branch of a downstream kernel tree
maintained by Shang Yatsen.[^1]
It worked well but remained sparsely documented, a lot of the work done
here relied on Shang Yatsen's original patch.
AOSC OS then picked it up[^2] to provide Intel Xe/Arc support for users of
its LoongArch port, for which I worked extensively on. After months of
positive user feedback and from encouragement from Kexy Biscuit, my
colleague at the community, I decided to examine its potential for
upstreaming, cross-reference kernel and Intel documentation to better
document and revise this patch.
Now that this series has been tested good (for boot up, OpenGL, and
playback of a standardised set of video samples[^3] on the following
platforms (motherboard + GPU model):
- x86-64, 4KiB kernel page:
- MS-7D42 + Intel Arc A580
- COLORFIRE B760M-MEOW WIFI D5 + Intel Arc B580
- LoongArch, 16KiB kernel page:
- XA61200 + GUNNIR DG1 Blue Halberd (Intel DG1)
- XA61200 + GUNNIR Iris Xe Index 4 (Intel DG1)
- XA61200 + GUNNIR Intel Iris Xe Max Index V2 (Intel DG1)
- XA61200 + GUNNIR Intel Arc A380 Index 6G (Intel Arc A380)
- XA61200 + ASRock Arc A380 Challenger ITX OC (Intel Arc A380)
- XA61200 + Intel Arc A580
- XA61200 + GUNNIR Intel Arc A750 Photon 8G OC (Intel Arc A750)
- XA61200 + Intel Arc B580
- XB612B0 + GUNNIR Intel Iris Xe Max Index V2 (Intel DG1)
- XB612B0 + GUNNIR Intel Arc A380 Index 6G (Intel Arc A380)
- ASUS XC-LS3A6M + GUNNIR Intel Arc B580 INDEX 12G (Intel Arc B580)
On these platforms, basic functionalities tested good but the driver was
unstable with occasional resets (I do suspect however, that this platform
suffers from PCIe coherence issues, as instability only occurs under heavy
VRAM I/O load):
- AArch64, 4KiB/64KiB kernel pages:
- ERUN-FD3000 (Phytium D3000) + GUNNIR Intel Iris Xe Max Index V2
(Intel DG1)
- ERUN-FD3000 (Phytium D3000) + GUNNIR Intel Arc A380 Index 6G
(Intel Arc A380)
- ERUN-FD3000 (Phytium D3000) + GUNNIR Intel Arc A750 Photon 8G OC
(Intel Arc A750)
I think that this patch series is now ready for your comment and review.
Please forgive me if I made any simple mistake or used wrong terminologies,
but I have never worked on a patch for the DRM subsystem and my experience
is still quite thin.
But anyway, just letting you all know that Intel Xe/Arc works on non-4KiB
kernel page platforms (and honestly, it's great to use, especially for
games and media playback)!
[^1]: https://github.com/FanFansfan/loongson-linux/tree/loongarch-xe
[^2]: We maintained Shang Yatsen's patch until our v6.13.3 tree, until
we decided to test and send this series upstream,
https://github.com/AOSC-Tracking/linux/tree/aosc/v6.13.3
[^3]: Delicious hot pot!
https://repo.aosc.io/ahvl/sample-videos-20250223.tar.zst
---
Matthew(s), Lucas, and Francois:
Thanks again for your patience and review.
I recently had a job change and it put me off this series for months, but
I'm back (and should be a lot more responsive now) - sorry! Let's get this
ball rolling again.
I was unfortunately unable to revise 1/5 from v1 as you requested, neither
of your suggestions to allow allocation of VRAM smaller than page size
worked... So I kept that part as is.
As for the your comment in 5/5, I'm not sure about what the right approach
to implement a SZ_64K >= PAGE_SIZE assert was, as there are many other
instances of similar ternary conditional operators in the xe code. Correct
me if I'm wrong but I felt that it might be better handled in a separate
patch series?
---
Changes in v2:
- Define `GUC_ALIGN' and use them in GuC code to improve clarity.
- Update documentation on `DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT'.
- Rebase, and other minor changes.
- Link to v1:
https://lore.kernel.org/all/20250226-xe-non-4k-fix-v1-0-80f23b5ee40e@aosc.i…
To: Lucas De Marchi <lucas.demarchi(a)intel.com>
To: Thomas Hellström <thomas.hellstrom(a)linux.intel.com>
To: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
To: Maarten Lankhorst <maarten.lankhorst(a)linux.intel.com>
To: Maxime Ripard <mripard(a)kernel.org>
To: Thomas Zimmermann <tzimmermann(a)suse.de>
To: David Airlie <airlied(a)gmail.com>
To: Simona Vetter <simona(a)ffwll.ch>
To: José Roberto de Souza <jose.souza(a)intel.com>
To: Francois Dugast <francois.dugast(a)intel.com>
To: Matthew Brost <matthew.brost(a)intel.com>
To: Alan Previn <alan.previn.teres.alexis(a)intel.com>
To: Zhanjun Dong <zhanjun.dong(a)intel.com>
To: Matt Roper <matthew.d.roper(a)intel.com>
To: Mateusz Naklicki <mateusz.naklicki(a)intel.com>
Cc: Mauro Carvalho Chehab <mauro.chehab(a)linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski(a)intel.com>
Cc: intel-xe(a)lists.freedesktop.org
Cc: dri-devel(a)lists.freedesktop.org
Cc: linux-kernel(a)vger.kernel.org
Suggested-by: Kexy Biscuit <kexybiscuit(a)aosc.io>
Co-developed-by: Shang Yatsen <429839446(a)qq.com>
Signed-off-by: Shang Yatsen <429839446(a)qq.com>
Signed-off-by: Mingcong Bai <jeffbai(a)aosc.io>
---
Mingcong Bai (5):
drm/xe/bo: fix alignment with non-4KiB kernel page sizes
drm/xe/guc: use GUC_SIZE (SZ_4K) for alignment
drm/xe/regs: fix RING_CTL_SIZE(size) calculation
drm/xe: use 4KiB alignment for cursor jumps
drm/xe/query: use PAGE_SIZE as the minimum page alignment
drivers/gpu/drm/xe/regs/xe_engine_regs.h | 2 +-
drivers/gpu/drm/xe/xe_bo.c | 8 ++++----
drivers/gpu/drm/xe/xe_guc.c | 4 ++--
drivers/gpu/drm/xe/xe_guc.h | 3 +++
drivers/gpu/drm/xe/xe_guc_ads.c | 32 ++++++++++++++++----------------
drivers/gpu/drm/xe/xe_guc_capture.c | 8 ++++----
drivers/gpu/drm/xe/xe_guc_ct.c | 2 +-
drivers/gpu/drm/xe/xe_guc_log.c | 5 +++--
drivers/gpu/drm/xe/xe_guc_pc.c | 4 ++--
drivers/gpu/drm/xe/xe_migrate.c | 4 ++--
drivers/gpu/drm/xe/xe_query.c | 2 +-
include/uapi/drm/xe_drm.h | 7 +++++--
12 files changed, 44 insertions(+), 37 deletions(-)
---
base-commit: 546b1c9e93c2bb8cf5ed24e0be1c86bb089b3253
change-id: 20250603-upstream-xe-non-4k-v2-4acf253c9bfd
Best regards,
--
Mingcong Bai <jeffbai(a)aosc.io>
The patch titled
Subject: bcache: remove unnecessary select MIN_HEAP
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
bcache-remove-unnecessary-select-min_heap.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Kuan-Wei Chiu <visitorckw(a)gmail.com>
Subject: bcache: remove unnecessary select MIN_HEAP
Date: Sun, 15 Jun 2025 04:23:53 +0800
After reverting the transition to the generic min heap library, bcache no
longer depends on MIN_HEAP. The select entry can be removed to reduce
code size and shrink the kernel's attack surface.
This change effectively reverts the bcache-related part of commit
92a8b224b833 ("lib/min_heap: introduce non-inline versions of min heap API
functions").
This is part of a series of changes to address a performance regression
caused by the use of the generic min_heap implementation.
As reported by Robert, bcache now suffers from latency spikes, with P100
(max) latency increasing from 600 ms to 2.4 seconds every 5 minutes.
These regressions degrade bcache's effectiveness as a low-latency cache
layer and lead to frequent timeouts and application stalls in production
environments.
Link: https://lore.kernel.org/lkml/CAJhEC05+0S69z+3+FB2Cd0hD+pCRyWTKLEOsc8BOmH73p…
Link: https://lkml.kernel.org/r/20250614202353.1632957-4-visitorckw@gmail.com
Fixes: 866898efbb25 ("bcache: remove heap-related macros and switch to generic min_heap")
Fixes: 92a8b224b833 ("lib/min_heap: introduce non-inline versions of min heap API functions")
Signed-off-by: Kuan-Wei Chiu <visitorckw(a)gmail.com>
Reported-by: Robert Pang <robertpang(a)google.com>
Closes: https://lore.kernel.org/linux-bcache/CAJhEC06F_AtrPgw2-7CvCqZgeStgCtitbD-ry…
Acked-by: Coly Li <colyli(a)kernel.org>
Cc: Ching-Chun (Jim) Huang <jserv(a)ccns.ncku.edu.tw>
Cc: Kent Overstreet <kent.overstreet(a)linux.dev>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
drivers/md/bcache/Kconfig | 1 -
1 file changed, 1 deletion(-)
--- a/drivers/md/bcache/Kconfig~bcache-remove-unnecessary-select-min_heap
+++ a/drivers/md/bcache/Kconfig
@@ -5,7 +5,6 @@ config BCACHE
select BLOCK_HOLDER_DEPRECATED if SYSFS
select CRC64
select CLOSURES
- select MIN_HEAP
help
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
_
Patches currently in -mm which might be from visitorckw(a)gmail.com are
revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap.patch
revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch
bcache-remove-unnecessary-select-min_heap.patch
lib-math-gcd-use-static-key-to-select-implementation-at-runtime.patch
riscv-optimize-gcd-code-size-when-config_riscv_isa_zbb-is-disabled.patch
riscv-optimize-gcd-performance-on-risc-v-without-zbb-extension.patch
The patch titled
Subject: Revert "bcache: update min_heap_callbacks to use default builtin swap"
has been added to the -mm mm-hotfixes-unstable branch. Its filename is
revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap.patch
This patch will shortly appear at
https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patche…
This patch will later appear in the mm-hotfixes-unstable branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days
------------------------------------------------------
From: Kuan-Wei Chiu <visitorckw(a)gmail.com>
Subject: Revert "bcache: update min_heap_callbacks to use default builtin swap"
Date: Sun, 15 Jun 2025 04:23:51 +0800
Patch series "bcache: Revert min_heap migration due to performance
regression".
This patch series reverts the migration of bcache from its original heap
implementation to the generic min_heap library. While the original change
aimed to simplify the code and improve maintainability, it introduced a
severe performance regression in real-world scenarios.
As reported by Robert, systems using bcache now suffer from periodic
latency spikes, with P100 (max) latency increasing from 600 ms to 2.4
seconds every 5 minutes. This degrades bcache's value as a low-latency
caching layer, and leads to frequent timeouts and application stalls in
production environments.
The primary cause of this regression is the behavior of the generic
min_heap implementation's bottom-up sift_down, which performs up to 2 *
log2(n) comparisons when many elements are equal. The original top-down
variant used by bcache only required O(1) comparisons in such cases. The
issue was further exacerbated by commit 92a8b224b833 ("lib/min_heap:
introduce non-inline versions of min heap API functions"), which
introduced non-inlined versions of the min_heap API, adding function call
overhead to a performance-critical hot path.
This patch (of 3):
This reverts commit 3d8a9a1c35227c3f1b0bd132c9f0a80dbda07b65.
Although removing the custom swap function simplified the code, this
change is part of a broader migration to the generic min_heap API that
introduced significant performance regressions in bcache.
As reported by Robert, bcache now suffers from latency spikes, with P100
(max) latency increasing from 600 ms to 2.4 seconds every 5 minutes.
These regressions degrade bcache's effectiveness as a low-latency cache
layer and lead to frequent timeouts and application stalls in production
environments.
This revert is part of a series of changes to restore previous performance
by undoing the min_heap transition.
Link: https://lkml.kernel.org/r/20250614202353.1632957-1-visitorckw@gmail.com
Link: https://lore.kernel.org/lkml/CAJhEC05+0S69z+3+FB2Cd0hD+pCRyWTKLEOsc8BOmH73p…
Link: https://lkml.kernel.org/r/20250614202353.1632957-2-visitorckw@gmail.com
Fixes: 866898efbb25 ("bcache: remove heap-related macros and switch to generic min_heap")
Fixes: 92a8b224b833 ("lib/min_heap: introduce non-inline versions of min heap API functions")
Signed-off-by: Kuan-Wei Chiu <visitorckw(a)gmail.com>
Reported-by: Robert Pang <robertpang(a)google.com>
Closes: https://lore.kernel.org/linux-bcache/CAJhEC06F_AtrPgw2-7CvCqZgeStgCtitbD-ry…
Acked-by: Coly Li <colyli(a)kernel.org>
Cc: Ching-Chun (Jim) Huang <jserv(a)ccns.ncku.edu.tw>
Cc: Kent Overstreet <kent.overstreet(a)linux.dev>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
drivers/md/bcache/alloc.c | 11 +++++++++--
drivers/md/bcache/bset.c | 14 +++++++++++---
drivers/md/bcache/extents.c | 10 +++++++++-
drivers/md/bcache/movinggc.c | 10 +++++++++-
4 files changed, 38 insertions(+), 7 deletions(-)
--- a/drivers/md/bcache/alloc.c~revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap
+++ a/drivers/md/bcache/alloc.c
@@ -189,16 +189,23 @@ static inline bool new_bucket_min_cmp(co
return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
}
+static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
+{
+ struct bucket **lhs = l, **rhs = r;
+
+ swap(*lhs, *rhs);
+}
+
static void invalidate_buckets_lru(struct cache *ca)
{
struct bucket *b;
const struct min_heap_callbacks bucket_max_cmp_callback = {
.less = new_bucket_max_cmp,
- .swp = NULL,
+ .swp = new_bucket_swap,
};
const struct min_heap_callbacks bucket_min_cmp_callback = {
.less = new_bucket_min_cmp,
- .swp = NULL,
+ .swp = new_bucket_swap,
};
ca->heap.nr = 0;
--- a/drivers/md/bcache/bset.c~revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap
+++ a/drivers/md/bcache/bset.c
@@ -1093,6 +1093,14 @@ static inline bool new_btree_iter_cmp(co
return bkey_cmp(_l->k, _r->k) <= 0;
}
+static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
+{
+ struct btree_iter_set *_iter1 = iter1;
+ struct btree_iter_set *_iter2 = iter2;
+
+ swap(*_iter1, *_iter2);
+}
+
static inline bool btree_iter_end(struct btree_iter *iter)
{
return !iter->heap.nr;
@@ -1103,7 +1111,7 @@ void bch_btree_iter_push(struct btree_it
{
const struct min_heap_callbacks callbacks = {
.less = new_btree_iter_cmp,
- .swp = NULL,
+ .swp = new_btree_iter_swap,
};
if (k != end)
@@ -1149,7 +1157,7 @@ static inline struct bkey *__bch_btree_i
struct bkey *ret = NULL;
const struct min_heap_callbacks callbacks = {
.less = cmp,
- .swp = NULL,
+ .swp = new_btree_iter_swap,
};
if (!btree_iter_end(iter)) {
@@ -1223,7 +1231,7 @@ static void btree_mergesort(struct btree
: bch_ptr_invalid;
const struct min_heap_callbacks callbacks = {
.less = b->ops->sort_cmp,
- .swp = NULL,
+ .swp = new_btree_iter_swap,
};
/* Heapify the iterator, using our comparison function */
--- a/drivers/md/bcache/extents.c~revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap
+++ a/drivers/md/bcache/extents.c
@@ -266,12 +266,20 @@ static bool new_bch_extent_sort_cmp(cons
return !(c ? c > 0 : _l->k < _r->k);
}
+static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
+{
+ struct btree_iter_set *_iter1 = iter1;
+ struct btree_iter_set *_iter2 = iter2;
+
+ swap(*_iter1, *_iter2);
+}
+
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
struct bkey *tmp)
{
const struct min_heap_callbacks callbacks = {
.less = new_bch_extent_sort_cmp,
- .swp = NULL,
+ .swp = new_btree_iter_swap,
};
while (iter->heap.nr > 1) {
struct btree_iter_set *top = iter->heap.data, *i = top + 1;
--- a/drivers/md/bcache/movinggc.c~revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap
+++ a/drivers/md/bcache/movinggc.c
@@ -190,6 +190,14 @@ static bool new_bucket_cmp(const void *l
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
}
+static void new_bucket_swap(void *l, void *r, void __always_unused *args)
+{
+ struct bucket **_l = l;
+ struct bucket **_r = r;
+
+ swap(*_l, *_r);
+}
+
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
@@ -204,7 +212,7 @@ void bch_moving_gc(struct cache_set *c)
unsigned long sectors_to_move, reserve_sectors;
const struct min_heap_callbacks callbacks = {
.less = new_bucket_cmp,
- .swp = NULL,
+ .swp = new_bucket_swap,
};
if (!c->copy_gc_enabled)
_
Patches currently in -mm which might be from visitorckw(a)gmail.com are
revert-bcache-update-min_heap_callbacks-to-use-default-builtin-swap.patch
revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch
bcache-remove-unnecessary-select-min_heap.patch
lib-math-gcd-use-static-key-to-select-implementation-at-runtime.patch
riscv-optimize-gcd-code-size-when-config_riscv_isa_zbb-is-disabled.patch
riscv-optimize-gcd-performance-on-risc-v-without-zbb-extension.patch
Some libc's like musl libc don't provide execinfo.h since it's not part
of POSIX. In order to fix compilation on musl, only include execinfo.h
if available (HAVE_BACKTRACE_SUPPORT)
This was discovered with c104c16073b7 ("Kunit to check the longest symbol length")
which starts to include linux/kallsyms.h with Alpine Linux' configs.
Signed-off-by: Achill Gilgenast <fossdd(a)pwned.life>
Cc: stable(a)vger.kernel.org
---
tools/include/linux/kallsyms.h | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index 5a37ccbec54f..f61a01dd7eb7 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
return NULL;
}
+#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#include <stdlib.h>
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
@@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip)
free(name);
}
+#else
+static inline void print_ip_sym(const char *loglvl, unsigned long ip) {}
+#endif
#endif
--
2.50.0.rc2
commit: 10685681bafc ("net_sched: sch_sfq: don't allow 1 packet limit")
fixes CVE-2024-57996 and commit: b3bf8f63e617 ("net_sched: sch_sfq: move
the limit validation") fixes CVE-2025-37752.
Patches 3 and 5 are CVE fixes for above mentioned CVEs. Patch 1,2 and 4
are pulled in as stable-deps.
Testing performed on the patched 5.10.238 kernel with the above 5
patches: (Used latest upstream kselftests for tc-testing)
# uname -a
Linux hamogala-vm-6 5.10.238+ #2 SMP Sun Jun 15 17:27:54 GMT 2025 x86_64 x86_64 x86_64 GNU/Linux
# ./tdc.py -f tc-tests/qdiscs/sfq.json
-- ns/SubPlugin.__init__
Test 7482: Create SFQ with default setting
Test c186: Create SFQ with limit setting
Test ae23: Create SFQ with perturb setting
Test a430: Create SFQ with quantum setting
Test 4539: Create SFQ with divisor setting
Test b089: Create SFQ with flows setting
Test 99a0: Create SFQ with depth setting
Test 7389: Create SFQ with headdrop setting
Test 6472: Create SFQ with redflowlimit setting
Test 8929: Show SFQ class
Test 4d6f: Check that limit of 1 is rejected
Test 7f8f: Check that a derived limit of 1 is rejected (limit 2 depth 1 flows 1)
Test 5168: Check that a derived limit of 1 is rejected (limit 2 depth 1 divisor 1)
All test results:
1..13
ok 1 7482 - Create SFQ with default setting
ok 2 c186 - Create SFQ with limit setting
ok 3 ae23 - Create SFQ with perturb setting
ok 4 a430 - Create SFQ with quantum setting
ok 5 4539 - Create SFQ with divisor setting
ok 6 b089 - Create SFQ with flows setting
ok 7 99a0 - Create SFQ with depth setting
ok 8 7389 - Create SFQ with headdrop setting
ok 9 6472 - Create SFQ with redflowlimit setting
ok 10 8929 - Show SFQ class
ok 11 4d6f - Check that limit of 1 is rejected
ok 12 7f8f - Check that a derived limit of 1 is rejected (limit 2 depth 1 flows 1)
ok 13 5168 - Check that a derived limit of 1 is rejected (limit 2 depth 1 divisor 1)
Thanks,
Harshit
Eric Dumazet (2):
net_sched: sch_sfq: annotate data-races around q->perturb_period
net_sched: sch_sfq: handle bigger packets
Octavian Purdila (3):
net_sched: sch_sfq: don't allow 1 packet limit
net_sched: sch_sfq: use a temporary work area for validating
configuration
net_sched: sch_sfq: move the limit validation
net/sched/sch_sfq.c | 112 ++++++++++++++++++++++++++++----------------
1 file changed, 71 insertions(+), 41 deletions(-)
--
2.47.1
When transitioning from USB_ROLE_DEVICE to USB_ROLE_NONE, the code
assumed that the regulator should be disabled. However, if the regulator
is marked as always-on, regulator_is_enabled() continues to return true,
leading to an incorrect attempt to disable a regulator which is not
enabled.
This can result in warnings such as:
[ 250.155624] WARNING: CPU: 1 PID: 7326 at drivers/regulator/core.c:3004
_regulator_disable+0xe4/0x1a0
[ 250.155652] unbalanced disables for VIN_SYS_5V0
To fix this, we move the regulator control logic into
tegra186_xusb_padctl_id_override() function since it's directly related
to the ID override state. The regulator is now only disabled when the role
transitions from USB_ROLE_HOST to USB_ROLE_NONE, by checking the VBUS_ID
register. This ensures that regulator enable/disable operations are
properly balanced and only occur when actually transitioning to/from host
mode.
Fixes: 49d46e3c7e59 ("phy: tegra: xusb: Add set_mode support for UTMI phy on Tegra186")
Cc: stable(a)vger.kernel.org
Signed-off-by: Wayne Chang <waynec(a)nvidia.com>
---
drivers/phy/tegra/xusb-tegra186.c | 59 +++++++++++++++++++------------
1 file changed, 37 insertions(+), 22 deletions(-)
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index fae6242aa730..1b35d50821f7 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -774,13 +774,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
}
static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
- bool status)
+ struct tegra_xusb_usb2_port *port, bool status)
{
- u32 value;
+ u32 value, id_override;
+ int err = 0;
dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
value = padctl_readl(padctl, USB2_VBUS_ID);
+ id_override = value & ID_OVERRIDE(~0);
if (status) {
if (value & VBUS_OVERRIDE) {
@@ -791,15 +793,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
value = padctl_readl(padctl, USB2_VBUS_ID);
}
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_GROUNDED;
+ if (id_override != ID_OVERRIDE_GROUNDED) {
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_GROUNDED;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ err = regulator_enable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to enable regulator: %d\n", err);
+ return err;
+ }
+ }
} else {
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_FLOATING;
+ if (id_override == ID_OVERRIDE_GROUNDED) {
+ /*
+ * The regulator is disabled only when the role transitions
+ * from USB_ROLE_HOST to USB_ROLE_NONE.
+ */
+ err = regulator_disable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to disable regulator: %d\n", err);
+ return err;
+ }
+
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+ }
}
- padctl_writel(padctl, value, USB2_VBUS_ID);
-
return 0;
}
@@ -818,27 +840,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
if (mode == PHY_MODE_USB_OTG) {
if (submode == USB_ROLE_HOST) {
- tegra186_xusb_padctl_id_override(padctl, true);
-
- err = regulator_enable(port->supply);
+ err = tegra186_xusb_padctl_id_override(padctl, port, true);
+ if (err)
+ goto out;
} else if (submode == USB_ROLE_DEVICE) {
tegra186_xusb_padctl_vbus_override(padctl, true);
} else if (submode == USB_ROLE_NONE) {
- /*
- * When port is peripheral only or role transitions to
- * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
- * enabled.
- */
- if (regulator_is_enabled(port->supply))
- regulator_disable(port->supply);
-
- tegra186_xusb_padctl_id_override(padctl, false);
+ err = tegra186_xusb_padctl_id_override(padctl, port, false);
+ if (err)
+ goto out;
tegra186_xusb_padctl_vbus_override(padctl, false);
}
}
-
+out:
mutex_unlock(&padctl->lock);
-
return err;
}
--
2.25.1