There is a good chance that while the channel ring gets processed, the STOP
or RESET command for the channel might be received from the MHI host. In
those cases, the entire channel ring processing needs to be protected by
chan->lock to prevent the race where the corresponding channel ring might
be reset.
While at it, let's also add a sanity check to make sure that the ring is
started before processing it. Because, if the STOP/RESET command gets
processed while mhi_ep_ch_ring_worker() waited for chan->lock, the ring
would've been reset.
Cc: <stable(a)vger.kernel.org> # 5.19
Fixes: 03c0bb8ec983 ("bus: mhi: ep: Add support for processing channel rings")
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam(a)linaro.org>
---
drivers/bus/mhi/ep/main.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 0bce6610ebf1..2362fcc8b32c 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -730,24 +730,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
list_del(&itr->node);
ring = itr->ring;
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ mutex_lock(&chan->lock);
+
+ /*
+ * The ring could've stopped while we waited to grab the (chan->lock), so do
+ * a sanity check before going further.
+ */
+ if (!ring->started) {
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
/* Update the write offset for the ring */
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset) {
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
el = &ring->ring_cache[ring->rd_offset];
- chan = &mhi_cntrl->mhi_chan[ring->ch_id];
- mutex_lock(&chan->lock);
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
ret = mhi_ep_process_ch_ring(ring, el);
if (ret) {
--
2.25.1
> On Jan 5, 2023, at 6:43 AM, Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> wrote:
>
> On Wed, Jan 04, 2023 at 04:56:31PM -0500, Joel Fernandes wrote:
>>
>>
>>>> On Jan 4, 2023, at 12:29 AM, Greg Kroah-Hartman <gregkh(a)linuxfoundation.org> wrote:
>>>
>>> On Tue, Jan 03, 2023 at 04:16:07PM +0000, Joel Fernandes wrote:
>>>>> On Tue, Jan 03, 2023 at 09:13:30AM +0100, Greg Kroah-Hartman wrote:
>>>>> This is the start of the stable review cycle for the 5.10.162 release.
>>>>> There are 63 patches in this series, all will be posted as a response
>>>>> to this one. If anyone has any issues with these being applied, please
>>>>> let me know.
>>>>>
>>>>> Responses should be made by Thu, 05 Jan 2023 08:12:47 +0000.
>>>>> Anything received after that time might be too late.
>>>>>
>>>>> The whole patch series can be found in one patch at:
>>>>> https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/patch-5.10.162-r…
>>>>> or in the git tree and branch at:
>>>>> git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git linux-5.10.y
>>>>> and the diffstat can be found below.
>>>>>
>>>>> thanks,
>>>>
>>>> Testing fails. Could you please pick these 2 up?
>>>> https://lore.kernel.org/r/20221230153215.1333921-1-joel@joelfernandes.org
>>>> https://lore.kernel.org/all/20221230153215.1333921-2-joel@joelfernandes.org/
>>>
>>> That is not a regression from 5.10.161, right?
>>
>> Yes it is not.
>>
>>> This release is only for
>>> the io_uring stuff to make sure that backport was done correctly.
>>>
>>> The current "to apply" queue for the stable trees is very large right
>>> now due to everyone waiting to get tiny things into -rc1 instead of
>>> before then, so the above two are still not yet queued up, sorry.
>>
>> Sure not a problem, I can resend again later if it is still not queued.
>
> You should have already received the email notices saying they were
> queued :)
I happen to take messages from Skynet with a grain of salt ;-).
But thank you for the automated notification!
- Joel
From: Francesco Dolcini <francesco.dolcini(a)toradex.com>
Add a fallback mechanism to handle the case in which #size-cells is set
to <0>. According to the DT binding the nand controller node should have
set it to 0 and this is not compatible with the legacy way of
specifying partitions directly as child nodes of the nand-controller node.
This fixes a boot failure on colibri-imx7 and potentially other boards.
Cc: stable(a)vger.kernel.org
Fixes: 753395ea1e45 ("ARM: dts: imx7: Fix NAND controller size-cells")
Link: https://lore.kernel.org/all/Y4dgBTGNWpM6SQXI@francesco-nb.int.toradex.com/
Signed-off-by: Francesco Dolcini <francesco.dolcini(a)toradex.com>
---
drivers/mtd/parsers/ofpart_core.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index 192190c42fc8..aa3b7fa61e50 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -122,6 +122,17 @@ static int parse_fixed_partitions(struct mtd_info *master,
a_cells = of_n_addr_cells(pp);
s_cells = of_n_size_cells(pp);
+ if (s_cells == 0) {
+ /*
+ * Use #size-cells = <1> for backward compatibility
+ * in case #size-cells is set to <0> and firmware adds
+ * OF partitions without setting it.
+ */
+ pr_warn_once("%s: ofpart partition %pOF (%pOF) #size-cells is <0>, using <1> for backward compatibility.\n",
+ master->name, pp,
+ mtd_node);
+ s_cells = 1;
+ }
if (len / 4 != a_cells + s_cells) {
pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n",
master->name, pp,
--
2.25.1
Saludo de la Sra. Angela Larsson,
Mi amado, Dios me ha tocado entregarte este dinero considerando mi
último deseo, y también debes saber que mi contacto contigo es por
gracia especial de Dios, por favor, comprende que no me estás ayudando
a mí, sino que estás trabajando para Dios. creador del cielo y de la
tierra. Y de nuevo viuda sufriendo de una larga enfermedad.
Actualmente estoy ingresado en un hospital de ligustro en el país,
tengo algunos fondos que heredé de mi difunto y amado esposo, el Sr.
Larsson Wisdom, la suma de ($ 5,500,000.00USD) que depositó en el
banco aquí y necesito un muy honesto y temeroso de Dios cristiano que
puede usar estos fondos para la obra de Dios. Quiero que tú y la
iglesia siempre oren por mí porque el señor es mi pastor. Mi felicidad
es que viví una vida de cristiano digno y quien quiera servir al Señor
debe servirlo en espíritu y Verdad.
Por favor, si pudiera utilizar estos fondos para la obra del Señor,
tenga la amabilidad de responderme. No olvide orar siempre por mí
porque toda mi esperanza de sobrevivir está en Dios, el creador, que
tiene la muerte y la vida.
Espero recibir tu respuesta.
Sra. Ángela Larsson.
From: Jan Kara <jack(a)suse.cz>
commit a44e84a9b7764c72896f7241a0ec9ac7e7ef38dd upstream.
When manipulating xattr blocks, we can deadlock infinitely looping
inside ext4_xattr_block_set() where we constantly keep finding xattr
block for reuse in mbcache but we are unable to reuse it because its
reference count is too big. This happens because cache entry for the
xattr block is marked as reusable (e_reusable set) although its
reference count is too big. When this inconsistency happens, this
inconsistent state is kept indefinitely and so ext4_xattr_block_set()
keeps retrying indefinitely.
The inconsistent state is caused by non-atomic update of e_reusable bit.
e_reusable is part of a bitfield and e_reusable update can race with
update of e_referenced bit in the same bitfield resulting in loss of one
of the updates. Fix the problem by using atomic bitops instead.
This bug has been around for many years, but it became *much* easier
to hit after commit 65f8b80053a1 ("ext4: fix race when reusing xattr
blocks").
A special backport to 5.15 was necessary due to changes to reference counting.
Cc: stable(a)vger.kernel.org # 5.15
Fixes: 6048c64b2609 ("mbcache: add reusable flag to cache entries")
Fixes: 65f8b80053a1 ("ext4: fix race when reusing xattr blocks")
Reported-and-tested-by: Jeremi Piotrowski <jpiotrowski(a)linux.microsoft.com>
Reported-by: Thilo Fromm <t-lo(a)linux.microsoft.com>
Link: https://lore.kernel.org/r/c77bf00f-4618-7149-56f1-b8d1664b9d07@linux.micros…
Signed-off-by: Jan Kara <jack(a)suse.cz>
Reviewed-by: Andreas Dilger <adilger(a)dilger.ca>
Link: https://lore.kernel.org/r/20221123193950.16758-1-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso(a)mit.edu>
Signed-off-by: Jeremi Piotrowski <jpiotrowski(a)linux.microsoft.com>
---
fs/ext4/xattr.c | 4 ++--
fs/mbcache.c | 14 ++++++++------
include/linux/mbcache.h | 9 +++++++--
3 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 533216e80fa2..22700812a4d3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1281,7 +1281,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr);
if (ce) {
- ce->e_reusable = 1;
+ set_bit(MBE_REUSABLE_B, &ce->e_flags);
mb_cache_entry_put(ea_block_cache, ce);
}
}
@@ -2042,7 +2042,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
}
BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
if (ref == EXT4_XATTR_REFCOUNT_MAX)
- ce->e_reusable = 0;
+ clear_bit(MBE_REUSABLE_B, &ce->e_flags);
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
ext4_xattr_block_csum_set(inode, new_bh);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2010bc80a3f2..ac07b50ea3df 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -94,8 +94,9 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_value = value;
- entry->e_reusable = reusable;
- entry->e_referenced = 0;
+ entry->e_flags = 0;
+ if (reusable)
+ set_bit(MBE_REUSABLE_B, &entry->e_flags);
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
@@ -155,7 +156,8 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
while (node) {
entry = hlist_bl_entry(node, struct mb_cache_entry,
e_hash_list);
- if (entry->e_key == key && entry->e_reusable) {
+ if (entry->e_key == key &&
+ test_bit(MBE_REUSABLE_B, &entry->e_flags)) {
atomic_inc(&entry->e_refcnt);
goto out;
}
@@ -325,7 +327,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
void mb_cache_entry_touch(struct mb_cache *cache,
struct mb_cache_entry *entry)
{
- entry->e_referenced = 1;
+ set_bit(MBE_REFERENCED_B, &entry->e_flags);
}
EXPORT_SYMBOL(mb_cache_entry_touch);
@@ -350,8 +352,8 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
- if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) {
- entry->e_referenced = 0;
+ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) || atomic_read(&entry->e_refcnt) > 2) {
+ clear_bit(MBE_REFERENCED_B, &entry->e_flags);
list_move_tail(&entry->e_list, &cache->c_list);
continue;
}
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 8eca7f25c432..62927f7e2588 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -10,6 +10,12 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
@@ -18,8 +24,7 @@ struct mb_cache_entry {
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
--
2.25.1