From: Jan Kara jack@suse.cz
[ Upstream commit 71b0576bdb862e964a82c73327cdd1a249c53e67 ]
Currently canceling of delayed work that flushes old data using cancel_old_flush() does not prevent work from being requeued. Thus in theory new work can be queued after cancel_old_flush() from reiserfs_freeze() has run. This will become larger problem once flush_old_commits() can requeue the work itself.
Fix the problem by recording in sbi->work_queue that flushing work is canceled and should not be requeued.
Signed-off-by: Jan Kara jack@suse.cz Signed-off-by: Sasha Levin alexander.levin@microsoft.com --- fs/reiserfs/journal.c | 2 +- fs/reiserfs/reiserfs.h | 1 + fs/reiserfs/super.c | 21 +++++++++++++++------ 3 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index d571e173a990..e503effd284d 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, * will be requeued because superblock is being shutdown and doesn't * have MS_ACTIVE set. */ - cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); + reiserfs_cancel_old_flush(sb); /* wait for all commits to finish */ cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 1894d96ccb7c..8fb8107710f7 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -2946,6 +2946,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_bitmap *, unsigned int);
void reiserfs_schedule_old_flush(struct super_block *s); +void reiserfs_cancel_old_flush(struct super_block *s); void add_save_link(struct reiserfs_transaction_handle *th, struct inode *inode, int truncate); int remove_save_link(struct inode *inode, int truncate); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index b27ef3541490..5b8acce4f863 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -89,7 +89,9 @@ static void flush_old_commits(struct work_struct *work) s = sbi->s_journal->j_work_sb;
spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Avoid clobbering the cancel state... */ + if (sbi->work_queued == 1) + sbi->work_queued = 0; spin_unlock(&sbi->old_work_lock);
reiserfs_sync_fs(s, 1); @@ -116,21 +118,22 @@ void reiserfs_schedule_old_flush(struct super_block *s) spin_unlock(&sbi->old_work_lock); }
-static void cancel_old_flush(struct super_block *s) +void reiserfs_cancel_old_flush(struct super_block *s) { struct reiserfs_sb_info *sbi = REISERFS_SB(s);
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Make sure no new flushes will be queued */ + sbi->work_queued = 2; spin_unlock(&sbi->old_work_lock); + cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); }
static int reiserfs_freeze(struct super_block *s) { struct reiserfs_transaction_handle th;
- cancel_old_flush(s); + reiserfs_cancel_old_flush(s);
reiserfs_write_lock(s); if (!(s->s_flags & MS_RDONLY)) { @@ -151,7 +154,13 @@ static int reiserfs_freeze(struct super_block *s)
static int reiserfs_unfreeze(struct super_block *s) { + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + reiserfs_allow_writes(s); + spin_lock(&sbi->old_work_lock); + /* Allow old_work to run again */ + sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); return 0; }
@@ -2164,7 +2173,7 @@ error_unlocked: if (sbi->commit_wq) destroy_workqueue(sbi->commit_wq);
- cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + reiserfs_cancel_old_flush(s);
reiserfs_free_bitmap_cache(s); if (SB_BUFFER_WITH_SB(s))