From: Gao Xiang hsiangkao@redhat.com
Currently, although set_bit() & test_bit() pairs are used as a fast- path for initialized configurations. However, these atomic ops are actually relaxed forms. Instead, load-acquire & store-release form is needed to make sure uninitialized fields won't be observed in advance here (yet no such corresponding bitops so use full barriers instead.)
Fixes: 62dc45979f3f ("staging: erofs: fix race of initializing xattrs of a inode at the same time") Fixes: 152a333a5895 ("staging: erofs: add compacted compression indexes support") Cc: stable@vger.kernel.org # 5.3+ Reported-by: Huang Jianan huangjianan@oppo.com Signed-off-by: Gao Xiang hsiangkao@redhat.com --- fs/erofs/xattr.c | 10 +++++++++- fs/erofs/zmap.c | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 5bde77d70852..47314a26767a 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode) int ret = 0;
/* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + }
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode) } xattr_iter_end(&it, atomic_map);
+ /* paired with smp_mb() at the beginning of the function. */ + smp_mb(); set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
out_unlock: diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index ae325541884e..14d2de35110c 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -36,8 +36,14 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) void *kaddr; struct z_erofs_map_header *h;
- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + }
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -83,6 +89,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + ((h->h_clusterbits >> 5) & 7); + /* paired with smp_mb() at the beginning of the function */ + smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); unmap_done: kunmap_atomic(kaddr);