The Cadence PCIe Controller integrated in the TI K3 SoCs supports both
Root-Complex and Endpoint modes of operation. The Glue Layer allows
"strapping" the mode of operation of the Controller, the Link Speed
and the Link Width. This is enabled by programming the "PCIEn_CTRL"
register (n corresponds to the PCIe instance) within the CTRL_MMR
memory-mapped register space.
In the PCIe Controller's register space, the same set of registers
that correspond to the Root-Port configuration space when the
Controller is configured for Root-Complex mode of operation, also
correspond to the Physical Function configuration space when the
Controller is configured for Endpoint mode of operation. As a result,
the "reset-value" of these set of registers _should_ vary depending
on the selected mode of operation. This is the expected behavior
according to the description of the registers and their reset values
in the Technical Reference Manual for the SoCs.
However, it is observed that the "reset-value" seen in practice
do not match the description. To be precise, when the Controller
is configured for Root-Complex mode of operation, the "reset-value"
of the Root-Port configuration space reflect the "reset-value"
corresponding to the Physical Function configuration space.
This can be attributed to the fact that the "strap" settings play
a role in "switching" the "reset-value" of the registers to match
the expected values as determined by the selected mode of operation.
Since the "strap" settings are sampled the moment the PCIe Controller
is powered ON, the "reset-value" of the registers are setup at that
point in time. As a result, if the "strap" settings are programmed
at a later point in time, it _will not_ update the "reset-value" of
the registers. This will cause the Physical Function configuration
space to be seen when the Root-Port configuration space is accessed
after programming the PCIe Controller for Root-Complex mode of
operation.
Fix this by powering off the PCIe Controller before programming the
"strap" settings and powering it on after that. This will ensure
that the "strap" settings that have been sampled convey the intended
mode of operation, thereby resulting in the "reset-value" of the
registers being accurate.
Fixes: f3e25911a430 ("PCI: j721e: Add TI J721E PCIe driver")
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Siddharth Vadapalli <s-vadapalli(a)ti.com>
---
Hello,
This patch is based on commit
155a3c003e55 Merge tag 'for-6.16/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
of Mainline Linux.
Patch has been validated on the J7200 SoC which is affected by
the existing programming sequence of the "strap" settings.
Regards,
Siddharth.
drivers/pci/controller/cadence/pci-j721e.c | 82 ++++++++++++++--------
1 file changed, 53 insertions(+), 29 deletions(-)
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6c93f39d0288..d5e7cb7277dc 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -173,10 +174,9 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
.link_up = j721e_pcie_link_up,
};
-static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
- unsigned int offset)
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct device *dev,
+ struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
u32 mask = J721E_MODE_RC;
u32 mode = pcie->mode;
u32 val = 0;
@@ -193,9 +193,9 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
}
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *np = dev->of_node;
int link_speed;
u32 val = 0;
@@ -214,9 +214,9 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
}
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
u32 mask = BIT(8);
u32 val = 0;
@@ -234,9 +234,9 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
}
static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct device *dev,
struct regmap *syscon)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
u32 mask = ACSPCIE_PAD_DISABLE_MASK;
struct of_phandle_args args;
@@ -263,9 +263,8 @@ static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
return 0;
}
-static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie, struct device *dev)
{
- struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
unsigned int offset = 0;
@@ -284,19 +283,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
- ret = j721e_pcie_set_mode(pcie, syscon, offset);
+ ret = j721e_pcie_set_mode(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
return ret;
}
- ret = j721e_pcie_set_link_speed(pcie, syscon, offset);
+ ret = j721e_pcie_set_link_speed(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set link speed\n");
return ret;
}
- ret = j721e_pcie_set_lane_count(pcie, syscon, offset);
+ ret = j721e_pcie_set_lane_count(pcie, dev, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set num-lanes\n");
return ret;
@@ -308,7 +307,7 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!syscon)
return 0;
- return j721e_enable_acspcie_refclk(pcie, syscon);
+ return j721e_enable_acspcie_refclk(pcie, dev, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -469,6 +468,47 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ pcie->mode = mode;
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret || num_lanes > data->max_lanes) {
+ dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
+ num_lanes = 1;
+ }
+
+ pcie->num_lanes = num_lanes;
+ pcie->max_lanes = data->max_lanes;
+
+ /*
+ * The PCIe Controller's registers have different "reset-value" depending
+ * on the "strap" settings programmed into the Controller's Glue Layer.
+ * This is because the same set of registers are used for representing the
+ * Physical Function configuration space in Endpoint mode and for
+ * representing the Root-Port configuration space in Root-Complex mode.
+ *
+ * The registers latch onto a "reset-value" based on the "strap" settings
+ * sampled after the Controller is powered on. Therefore, for the
+ * "reset-value" to be accurate, it is necessary to program the "strap"
+ * settings when the Controller is powered off, and power on the Controller
+ * after the "strap" settings have been programmed.
+ *
+ * The "strap" settings are programmed by "j721e_pcie_ctrl_init()".
+ * Therefore, power off the Controller before invoking "j721e_pcie_ctrl_init()",
+ * program the "strap" settings, and then power on the Controller. This ensures
+ * that the reset values are accurate and reflect the "strap" settings.
+ */
+ dev_pm_domain_detach(dev, true);
+
+ ret = j721e_pcie_ctrl_init(pcie, dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to power on PCIe Controller\n");
+ return ret;
+ }
+
switch (mode) {
case PCI_MODE_RC:
if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
@@ -510,7 +550,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return 0;
}
- pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
@@ -523,15 +562,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return PTR_ERR(base);
pcie->user_cfg_base = base;
- ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > data->max_lanes) {
- dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
- num_lanes = 1;
- }
-
- pcie->num_lanes = num_lanes;
- pcie->max_lanes = data->max_lanes;
-
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
return -EINVAL;
@@ -547,12 +577,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = j721e_pcie_ctrl_init(pcie);
- if (ret < 0) {
- dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
- goto err_get_sync;
- }
-
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
@@ -680,7 +704,7 @@ static int j721e_pcie_resume_noirq(struct device *dev)
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
int ret;
- ret = j721e_pcie_ctrl_init(pcie);
+ ret = j721e_pcie_ctrl_init(pcie, dev);
if (ret < 0)
return ret;
--
2.34.1
A new warning in clang [1] points out that id_reg is uninitialized then
passed to memstick_init_req() as a const pointer:
drivers/memstick/core/memstick.c:330:59: error: variable 'id_reg' is uninitialized when passed as a const pointer argument here [-Werror,-Wuninitialized-const-pointer]
330 | memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
| ^~~~~~
Commit de182cc8e882 ("drivers/memstick/core/memstick.c: avoid -Wnonnull
warning") intentionally passed this variable uninitialized to avoid an
-Wnonnull warning from a NULL value that was previously there because
id_reg is never read from the call to memstick_init_req() in
h_memstick_read_dev_id(). Just zero initialize id_reg to avoid the
warning, which is likely happening in the majority of builds using
modern compilers that support '-ftrivial-auto-var-init=zero'.
Cc: stable(a)vger.kernel.org
Fixes: de182cc8e882 ("drivers/memstick/core/memstick.c: avoid -Wnonnull warning")
Link: https://github.com/llvm/llvm-project/commit/00dacf8c22f065cb52efb14cd091d44… [1]
Closes: https://github.com/ClangBuiltLinux/linux/issues/2105
Signed-off-by: Nathan Chancellor <nathan(a)kernel.org>
---
drivers/memstick/core/memstick.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 043b9ec756ff..7f3f47db4c98 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(memstick_init_req);
static int h_memstick_read_dev_id(struct memstick_dev *card,
struct memstick_request **mrq)
{
- struct ms_id_register id_reg;
+ struct ms_id_register id_reg = {};
if (!(*mrq)) {
memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
---
base-commit: ff09b71bf9daeca4f21d6e5e449641c9fad75b53
change-id: 20250715-memstick-fix-uninit-const-pointer-ed6f138bf40d
Best regards,
--
Nathan Chancellor <nathan(a)kernel.org>
The ready event list of an epoll object is protected by read-write
semaphore:
- The consumer (waiter) acquires the write lock and takes items.
- the producer (waker) takes the read lock and adds items.
The point of this design is enabling epoll to scale well with large number
of producers, as multiple producers can hold the read lock at the same
time.
Unfortunately, this implementation may cause scheduling priority inversion
problem. Suppose the consumer has higher scheduling priority than the
producer. The consumer needs to acquire the write lock, but may be blocked
by the producer holding the read lock. Since read-write semaphore does not
support priority-boosting for the readers (even with CONFIG_PREEMPT_RT=y),
we have a case of priority inversion: a higher priority consumer is blocked
by a lower priority producer. This problem was reported in [1].
Furthermore, this could also cause stall problem, as described in [2].
Fix this problem by replacing rwlock with spinlock.
This reduces the event bandwidth, as the producers now have to contend with
each other for the spinlock. According to the benchmark from
https://github.com/rouming/test-tools/blob/master/stress-epoll.c:
On 12 x86 CPUs:
Before After Diff
threads events/ms events/ms
8 7162 4956 -31%
16 8733 5383 -38%
32 7968 5572 -30%
64 10652 5739 -46%
128 11236 5931 -47%
On 4 riscv CPUs:
Before After Diff
threads events/ms events/ms
8 2958 2833 -4%
16 3323 3097 -7%
32 3451 3240 -6%
64 3554 3178 -11%
128 3601 3235 -10%
Although the numbers look bad, it should be noted that this benchmark
creates multiple threads who do nothing except constantly generating new
epoll events, thus contention on the spinlock is high. For real workload,
the event rate is likely much lower, and the performance drop is not as
bad.
Using another benchmark (perf bench epoll wait) where spinlock contention
is lower, improvement is even observed on x86:
On 12 x86 CPUs:
Before: Averaged 110279 operations/sec (+- 1.09%), total secs = 8
After: Averaged 114577 operations/sec (+- 2.25%), total secs = 8
On 4 riscv CPUs:
Before: Averaged 175767 operations/sec (+- 0.62%), total secs = 8
After: Averaged 167396 operations/sec (+- 0.23%), total secs = 8
In conclusion, no one is likely to be upset over this change. After all,
spinlock was used originally for years, and the commit which converted to
rwlock didn't mention a real workload, just that the benchmark numbers are
nice.
This patch is not exactly the revert of commit a218cc491420 ("epoll: use
rwlock in order to reduce ep_poll_callback() contention"), because git
revert conflicts in some places which are not obvious on the resolution.
This patch is intended to be backported, therefore go with the obvious
approach:
- Replace rwlock_t with spinlock_t one to one
- Delete list_add_tail_lockless() and chain_epi_lockless(). These were
introduced to allow producers to concurrently add items to the list.
But now that spinlock no longer allows producers to touch the event
list concurrently, these two functions are not necessary anymore.
Fixes: a218cc491420 ("epoll: use rwlock in order to reduce ep_poll_callback() contention")
Signed-off-by: Nam Cao <namcao(a)linutronix.de>
Cc: stable(a)vger.kernel.org
---
fs/eventpoll.c | 139 +++++++++----------------------------------------
1 file changed, 26 insertions(+), 113 deletions(-)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 0fbf5dfedb24..a171f7e7dacc 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -46,10 +46,10 @@
*
* 1) epnested_mutex (mutex)
* 2) ep->mtx (mutex)
- * 3) ep->lock (rwlock)
+ * 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
- * We need a rwlock (ep->lock) because we manipulate objects
+ * We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
@@ -195,7 +195,7 @@ struct eventpoll {
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
- rwlock_t lock;
+ spinlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
@@ -740,10 +740,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
* in a lockless way.
*/
lockdep_assert_irqs_enabled();
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, txlist);
WRITE_ONCE(ep->ovflist, NULL);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
static void ep_done_scan(struct eventpoll *ep,
@@ -751,7 +751,7 @@ static void ep_done_scan(struct eventpoll *ep,
{
struct epitem *epi, *nepi;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
@@ -792,7 +792,7 @@ static void ep_done_scan(struct eventpoll *ep,
wake_up(&ep->wq);
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
static void ep_get(struct eventpoll *ep)
@@ -867,10 +867,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
rb_erase_cached(&epi->rbn, &ep->rbr);
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -1151,7 +1151,7 @@ static int ep_alloc(struct eventpoll **pep)
return -ENOMEM;
mutex_init(&ep->mtx);
- rwlock_init(&ep->lock);
+ spin_lock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
@@ -1238,100 +1238,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
}
#endif /* CONFIG_KCMP */
-/*
- * Adds a new entry to the tail of the list in a lockless way, i.e.
- * multiple CPUs are allowed to call this function concurrently.
- *
- * Beware: it is necessary to prevent any other modifications of the
- * existing list until all changes are completed, in other words
- * concurrent list_add_tail_lockless() calls should be protected
- * with a read lock, where write lock acts as a barrier which
- * makes sure all list_add_tail_lockless() calls are fully
- * completed.
- *
- * Also an element can be locklessly added to the list only in one
- * direction i.e. either to the tail or to the head, otherwise
- * concurrent access will corrupt the list.
- *
- * Return: %false if element has been already added to the list, %true
- * otherwise.
- */
-static inline bool list_add_tail_lockless(struct list_head *new,
- struct list_head *head)
-{
- struct list_head *prev;
-
- /*
- * This is simple 'new->next = head' operation, but cmpxchg()
- * is used in order to detect that same element has been just
- * added to the list from another CPU: the winner observes
- * new->next == new.
- */
- if (!try_cmpxchg(&new->next, &new, head))
- return false;
-
- /*
- * Initially ->next of a new element must be updated with the head
- * (we are inserting to the tail) and only then pointers are atomically
- * exchanged. XCHG guarantees memory ordering, thus ->next should be
- * updated before pointers are actually swapped and pointers are
- * swapped before prev->next is updated.
- */
-
- prev = xchg(&head->prev, new);
-
- /*
- * It is safe to modify prev->next and new->prev, because a new element
- * is added only to the tail and new->next is updated before XCHG.
- */
-
- prev->next = new;
- new->prev = prev;
-
- return true;
-}
-
-/*
- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
- * i.e. multiple CPUs are allowed to call this function concurrently.
- *
- * Return: %false if epi element has been already chained, %true otherwise.
- */
-static inline bool chain_epi_lockless(struct epitem *epi)
-{
- struct eventpoll *ep = epi->ep;
-
- /* Fast preliminary check */
- if (epi->next != EP_UNACTIVE_PTR)
- return false;
-
- /* Check that the same epi has not been just chained from another CPU */
- if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
- return false;
-
- /* Atomically exchange tail */
- epi->next = xchg(&ep->ovflist, epi);
-
- return true;
-}
-
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
- *
- * This callback takes a read lock in order not to contend with concurrent
- * events from another file descriptor, thus all modifications to ->rdllist
- * or ->ovflist are lockless. Read lock is paired with the write lock from
- * ep_start/done_scan(), which stops all list modifications and guarantees
- * that lists state is seen correctly.
- *
- * Another thing worth to mention is that ep_poll_callback() can be called
- * concurrently for the same @epi from different CPUs if poll table was inited
- * with several wait queues entries. Plural wakeup from different CPUs of a
- * single wait queue is serialized by wq.lock, but the case when multiple wait
- * queues are used should be detected accordingly. This is detected using
- * cmpxchg() operation.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
@@ -1342,7 +1252,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
int ewake = 0;
- read_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
@@ -1371,12 +1281,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
- if (chain_epi_lockless(epi))
+ if (epi->next == EP_UNACTIVE_PTR) {
+ epi->next = READ_ONCE(ep->ovflist);
+ WRITE_ONCE(ep->ovflist, epi);
ep_pm_stay_awake_rcu(epi);
+ }
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
- if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
- ep_pm_stay_awake_rcu(epi);
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+ ep_pm_stay_awake_rcu(epi);
}
/*
@@ -1409,7 +1322,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
pwake++;
out_unlock:
- read_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
@@ -1744,7 +1657,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
}
/* We have to drop the new item inside our item list to keep track of it */
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
@@ -1761,7 +1674,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
/* We have to call this outside the lock */
if (pwake)
@@ -1825,7 +1738,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1836,7 +1749,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
@@ -2088,7 +2001,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
init_wait(&wait);
wait.func = ep_autoremove_wake_function;
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
@@ -2107,7 +2020,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (!eavail)
__add_wait_queue_exclusive(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
if (!eavail)
timed_out = !ep_schedule_timeout(to) ||
@@ -2123,7 +2036,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
eavail = 1;
if (!list_empty_careful(&wait.entry)) {
- write_lock_irq(&ep->lock);
+ spin_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
@@ -2134,7 +2047,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
- write_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->lock);
}
}
}
--
2.39.5
Fix the order of the freq-table-hz property, then convert to OPP tables
and add interconnect support for UFS for the SM6350 SoC.
Signed-off-by: Luca Weiss <luca.weiss(a)fairphone.com>
---
Luca Weiss (3):
arm64: dts: qcom: sm6350: Fix wrong order of freq-table-hz for UFS
arm64: dts: qcom: sm6350: Add OPP table support to UFSHC
arm64: dts: qcom: sm6350: Add interconnect support to UFS
arch/arm64/boot/dts/qcom/sm6350.dtsi | 49 ++++++++++++++++++++++++++++--------
1 file changed, 39 insertions(+), 10 deletions(-)
---
base-commit: eea255893718268e1ab852fb52f70c613d109b99
change-id: 20250314-sm6350-ufs-things-53c5de9fec5e
Best regards,
--
Luca Weiss <luca.weiss(a)fairphone.com>
After a recent change in clang to strengthen uninitialized warnings [1],
it points out that in one of the error paths in parse_btf_arg(), params
is used uninitialized:
kernel/trace/trace_probe.c:660:19: warning: variable 'params' is uninitialized when used here [-Wuninitialized]
660 | return PTR_ERR(params);
| ^~~~~~
Match many other NO_BTF_ENTRY error cases and return -ENOENT, clearing
up the warning.
Cc: stable(a)vger.kernel.org
Closes: https://github.com/ClangBuiltLinux/linux/issues/2110
Fixes: d157d7694460 ("tracing/probes: Support BTF field access from $retval")
Link: https://github.com/llvm/llvm-project/commit/2464313eef01c5b1edf0eccf57a32cd… [1]
Signed-off-by: Nathan Chancellor <nathan(a)kernel.org>
---
kernel/trace/trace_probe.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 424751cdf31f..40830a3ecd96 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -657,7 +657,7 @@ static int parse_btf_arg(char *varname,
ret = query_btf_context(ctx);
if (ret < 0 || ctx->nr_params == 0) {
trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
- return PTR_ERR(params);
+ return -ENOENT;
}
}
params = ctx->params;
---
base-commit: 6921d1e07cb5eddec830801087b419194fde0803
change-id: 20250715-trace_probe-fix-const-uninit-warning-7dc3accce903
Best regards,
--
Nathan Chancellor <nathan(a)kernel.org>