6.17-stable review patch. If anyone has any objections, please let me know.
------------------
From: Yu Kuai yukuai3@huawei.com
[ Upstream commit e63200404477456ec60c62dd8b3b1092aba2e211 ]
No functional changes are intended, make code cleaner and prepare to fix the grow case in following patches.
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Nilay Shroff nilay@linux.ibm.com Signed-off-by: Jens Axboe axboe@kernel.dk Stable-dep-of: b86433721f46 ("blk-mq: fix potential deadlock while nr_requests grown") Signed-off-by: Sasha Levin sashal@kernel.org --- block/blk-mq.c | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index bcb7495893a09..1bafbdced7bd5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4935,25 +4935,40 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) blk_mq_quiesce_queue(q);
if (blk_mq_is_shared_tags(set->flags)) { + /* + * Shared tags, for sched tags, we allocate max initially hence + * tags can't grow, see blk_mq_alloc_sched_tags(). + */ if (q->elevator) blk_mq_tag_update_sched_shared_tags(q); else blk_mq_tag_resize_shared_tags(set, nr); - } else { + } else if (!q->elevator) { + /* + * Non-shared hardware tags, nr is already checked from + * queue_requests_store() and tags can't grow. + */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->tags) continue; - /* - * If we're using an MQ scheduler, just update the - * scheduler queue depth. This is similar to what the - * old code would do. - */ - if (hctx->sched_tags) - ret = blk_mq_tag_update_depth(hctx, - &hctx->sched_tags, nr); - else - ret = blk_mq_tag_update_depth(hctx, - &hctx->tags, nr); + sbitmap_queue_resize(&hctx->tags->bitmap_tags, + nr - hctx->tags->nr_reserved_tags); + } + } else if (nr <= q->elevator->et->nr_requests) { + /* Non-shared sched tags, and tags don't grow. */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx->sched_tags) + continue; + sbitmap_queue_resize(&hctx->sched_tags->bitmap_tags, + nr - hctx->sched_tags->nr_reserved_tags); + } + } else { + /* Non-shared sched tags, and tags grow */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx->sched_tags) + continue; + ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, + nr); if (ret) goto out; }