From: Jens Axboe axboe@kernel.dk
commit c93cc9e16d88e0f5ea95d2d65d58a8a4dab258bc upstream.
If we're freeing/finishing iopoll requests, ensure we check if the task is in idling in terms of cancelation. Otherwise we could end up waiting forever in __io_uring_task_cancel() if the task has active iopoll requests that need cancelation.
Cc: stable@vger.kernel.org # 5.9+ Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- fs/io_uring.c | 4 ++++ 1 file changed, 4 insertions(+)
--- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2167,6 +2167,8 @@ static void io_req_free_batch_finish(str struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs); + if (atomic_read(&tctx->in_idle)) + wake_up(&tctx->wait); put_task_struct_many(rb->task, rb->task_refs); rb->task = NULL; } @@ -2186,6 +2188,8 @@ static void io_req_free_batch(struct req struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs); + if (atomic_read(&tctx->in_idle)) + wake_up(&tctx->wait); put_task_struct_many(rb->task, rb->task_refs); } rb->task = req->task;