From ade501a5ea27db18e827054d812ea6cc4679b65e Mon Sep 17 00:00:00 2001 From: Ionut Nechita ionut.nechita@windriver.com Date: Tue, 23 Dec 2025 12:29:14 +0200 Subject: [PATCH] block/blk-mq: fix RT kernel regression with dedicated quiesce_sync_lock
In RT kernel (PREEMPT_RT), commit 679b1874eba7 ("block: fix ordering between checking QUEUE_FLAG_QUIESCED request adding") causes severe performance regression on systems with multiple MSI-X interrupt vectors.
The commit added spinlock_t queue_lock to blk_mq_run_hw_queue() to synchronize QUEUE_FLAG_QUIESCED checks with blk_mq_unquiesce_queue(). While this works correctly in standard kernel, it causes catastrophic serialization in RT kernel where spinlock_t converts to sleeping rt_mutex.
Problem in RT kernel: - blk_mq_run_hw_queue() is called from IRQ thread context (I/O completion) - With 8 MSI-X vectors, all 8 IRQ threads contend on the same queue_lock - queue_lock becomes rt_mutex (sleeping) in RT kernel - IRQ threads serialize and enter D-state waiting for lock - Throughput drops from 640 MB/s to 153 MB/s
The original commit message noted that memory barriers were considered but rejected because "memory barrier is not easy to be maintained" - barriers would need to be added at multiple call sites throughout the block layer where work is added before calling blk_mq_run_hw_queue().
Solution: Instead of using the general-purpose queue_lock or attempting complex memory barrier pairing across many call sites, introduce a dedicated raw_spinlock_t quiesce_sync_lock specifically for synchronizing the quiesce state between: - blk_mq_quiesce_queue_nowait() - blk_mq_unquiesce_queue() - blk_mq_run_hw_queue()
Why raw_spinlock is safe: - Critical section is provably short (only flag and counter checks) - No sleeping operations under lock - raw_spinlock does not convert to rt_mutex in RT kernel - Provides same ordering guarantees as original queue_lock approach
This approach: - Maintains correctness of original synchronization - Avoids sleeping in RT kernel's IRQ thread context - Limits scope to only quiesce-related synchronization - Simpler than auditing all call sites for memory barrier pairing
Additionally, change blk_freeze_queue_start to use async=true for better performance in RT kernel by avoiding synchronous queue runs during freeze.
Test results on RT kernel (megaraid_sas with 8 MSI-X vectors): - Before: 153 MB/s, 6-8 IRQ threads in D-state - After: 640 MB/s, 0 IRQ threads blocked
Fixes: 679b1874eba7 ("block: fix ordering between checking QUEUE_FLAG_QUIESCED request adding") Cc: stable@vger.kernel.org Signed-off-by: Ionut Nechita ionut.nechita@windriver.com --- block/blk-core.c | 1 + block/blk-mq.c | 30 +++++++++++++++++++----------- include/linux/blkdev.h | 6 ++++++ 3 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index c7b6c1f76359..33a954422415 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -434,6 +434,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) mutex_init(&q->limits_lock); mutex_init(&q->rq_qos_mutex); spin_lock_init(&q->queue_lock); + raw_spin_lock_init(&q->quiesce_sync_lock);
init_waitqueue_head(&q->mq_freeze_wq); mutex_init(&q->mq_freeze_lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index e1bca29dc358..c7ca2f485e8e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -178,7 +178,7 @@ bool __blk_freeze_queue_start(struct request_queue *q, percpu_ref_kill(&q->q_usage_counter); mutex_unlock(&q->mq_freeze_lock); if (queue_is_mq(q)) - blk_mq_run_hw_queues(q, false); + blk_mq_run_hw_queues(q, true); } else { mutex_unlock(&q->mq_freeze_lock); } @@ -289,10 +289,10 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { unsigned long flags;
- spin_lock_irqsave(&q->queue_lock, flags); + raw_spin_lock_irqsave(&q->quiesce_sync_lock, flags); if (!q->quiesce_depth++) blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); - spin_unlock_irqrestore(&q->queue_lock, flags); + raw_spin_unlock_irqrestore(&q->quiesce_sync_lock, flags); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
@@ -344,14 +344,14 @@ void blk_mq_unquiesce_queue(struct request_queue *q) unsigned long flags; bool run_queue = false;
- spin_lock_irqsave(&q->queue_lock, flags); + raw_spin_lock_irqsave(&q->quiesce_sync_lock, flags); if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { ; } else if (!--q->quiesce_depth) { blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); run_queue = true; } - spin_unlock_irqrestore(&q->queue_lock, flags); + raw_spin_unlock_irqrestore(&q->quiesce_sync_lock, flags);
/* dispatch requests which are inserted during quiescing */ if (run_queue) @@ -2323,19 +2323,27 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+ /* + * First lockless check to avoid unnecessary overhead. + */ need_run = blk_mq_hw_queue_need_run(hctx); if (!need_run) { unsigned long flags;
/* - * Synchronize with blk_mq_unquiesce_queue(), because we check - * if hw queue is quiesced locklessly above, we need the use - * ->queue_lock to make sure we see the up-to-date status to - * not miss rerunning the hw queue. + * Synchronize with blk_mq_unquiesce_queue(). We check if hw + * queue is quiesced locklessly above, so we need to use + * quiesce_sync_lock to ensure we see the up-to-date status + * and don't miss rerunning the hw queue. + * + * Uses raw_spinlock to avoid sleeping in RT kernel's IRQ + * thread context during I/O completion. Critical section is + * short (only flag and counter checks), making raw_spinlock + * safe. */ - spin_lock_irqsave(&hctx->queue->queue_lock, flags); + raw_spin_lock_irqsave(&hctx->queue->quiesce_sync_lock, flags); need_run = blk_mq_hw_queue_need_run(hctx); - spin_unlock_irqrestore(&hctx->queue->queue_lock, flags); + raw_spin_unlock_irqrestore(&hctx->queue->quiesce_sync_lock, flags);
if (!need_run) return; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cd9c97f6f948..0f651a4fae8d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -480,6 +480,12 @@ struct request_queue { struct request *last_merge;
spinlock_t queue_lock; + /* + * Synchronizes quiesce state checks between blk_mq_run_hw_queue() + * and blk_mq_unquiesce_queue(). Uses raw_spinlock to avoid sleeping + * in RT kernel's IRQ thread context during I/O completion. + */ + raw_spinlock_t quiesce_sync_lock;
int quiesce_depth;
linux-stable-mirror@lists.linaro.org