There is a short window where percpu_refs are already turned zero, but we try to do resurrect(). Play nicer and wait for ->release() to happen in this case and proceed as everything is ok. One little downside is that we can ignore signal_pending() on a rare occasion, but someone else should check for it later if needed.
Cc: stable@vger.kernel.org # 5.5+ Signed-off-by: Pavel Begunkov asml.silence@gmail.com --- fs/io_uring.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index f2fdebaf28fe..6ea4633e5ed5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1104,6 +1104,21 @@ static inline void io_set_resource_node(struct io_kiocb *req) } }
+static bool io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) +{ + if (!percpu_ref_tryget(ref)) { + /* already at zero, wait for ->release() */ + if (!try_wait_for_completion(compl)) + synchronize_rcu(); + return false; + } + + percpu_ref_resurrect(ref); + reinit_completion(compl); + percpu_ref_put(ref); + return true; +} + static bool io_match_task(struct io_kiocb *head, struct task_struct *task, struct files_struct *files) @@ -10094,10 +10109,8 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
mutex_lock(&ctx->uring_lock);
- if (ret) { - percpu_ref_resurrect(&ctx->refs); - goto out_quiesce; - } + if (ret && io_refs_resurrect(&ctx->refs, &ctx->ref_comp)) + return ret; }
if (ctx->restricted) { @@ -10189,7 +10202,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, if (io_register_op_must_quiesce(opcode)) { /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); -out_quiesce: reinit_completion(&ctx->ref_comp); } return ret;