The mhi_ep_read_channel function incorrectly assumes the End of Transfer (EOT) bit is received with the doorbell in chained transactions, causing it to advance mhi_chan->rd_offset beyond wr_offset during host-to-device transfers when EOT has not yet arrived, leading to access of unmapped host memory that causes IOMMU faults and processing of stale TREs.
Modify the loop condition to ensure mhi_queue is not empty, allowing the function to process only valid TREs up to the current write pointer to prevent premature reads and ensure safe traversal of chained TREs. Remove buf_left from the while loop condition to avoid exiting prematurely before reading the ring completely, and remove write_offset since it will always be zero because the new cache buffer is allocated every time.
Fixes: 5301258899773 ("bus: mhi: ep: Add support for reading from the host") Cc: stable@vger.kernel.org Co-developed-by: Akhil Vinod akhil.vinod@oss.qualcomm.com Signed-off-by: Akhil Vinod akhil.vinod@oss.qualcomm.com Signed-off-by: Sumit Kumar sumit.kumar@oss.qualcomm.com --- Changes in v3: - Update commit message - Migrated to new mail - Link to v2: https://lore.kernel.org/r/20250822-chained_transfer-v2-1-7aeb5ac215b6@quicin...
Changes in v2: - Use mhi_ep_queue_is_empty in while loop (Mani). - Remove do while loop in mhi_ep_process_ch_ring (Mani). - Remove buf_left, wr_offset, tr_done. - Haven't added Reviewed-by as there is change in logic. - Link to v1: https://lore.kernel.org/r/20250709-chained_transfer-v1-1-2326a4605c9c@quicin... --- drivers/bus/mhi/ep/main.c | 37 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 25 deletions(-)
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index b3eafcf2a2c50d95e3efd3afb27038ecf55552a5..cdea24e9291959ae0a92487c1b9698dc8164d2f1 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t tr_len, read_offset, write_offset; + size_t tr_len, read_offset; struct mhi_ep_buf_info buf_info = {}; u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; - bool tr_done = false; void *buf_addr; - u32 buf_left; int ret;
- buf_left = len; - do { /* Don't process the transfer ring if the channel is not in RUNNING state */ if (mhi_chan->state != MHI_CH_STATE_RUNNING) { @@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); - tr_len = min(buf_left, mhi_chan->tre_bytes_left); + tr_len = min(len, mhi_chan->tre_bytes_left); } else { mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size); + tr_len = min(len, mhi_chan->tre_size); }
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; - write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset; - buf_info.dev_addr = buf_addr + write_offset; + buf_info.dev_addr = buf_addr; buf_info.size = tr_len; buf_info.cb = mhi_ep_read_completion; buf_info.cb_buf = buf_addr; @@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_buf_addr; }
- buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) { - if (MHI_TRE_DATA_GET_IEOT(el)) - tr_done = true; - + if (!mhi_chan->tre_bytes_left) mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; - } - } while (buf_left && !tr_done); + /* Read until the some buffer is left or the ring becomes not empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - return ret; - } - - /* Read until the ring becomes empty */ - } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + ret = mhi_ep_read_channel(mhi_cntrl, ring); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + return ret; + } }
return 0;
--- base-commit: 4c06e63b92038fadb566b652ec3ec04e228931e8 change-id: 20250910-final_chained-750c213725f1
Best regards,
On Wed, 10 Sep 2025 18:11:09 +0530, Sumit Kumar wrote:
The mhi_ep_read_channel function incorrectly assumes the End of Transfer (EOT) bit is received with the doorbell in chained transactions, causing it to advance mhi_chan->rd_offset beyond wr_offset during host-to-device transfers when EOT has not yet arrived, leading to access of unmapped host memory that causes IOMMU faults and processing of stale TREs.
Modify the loop condition to ensure mhi_queue is not empty, allowing the function to process only valid TREs up to the current write pointer to prevent premature reads and ensure safe traversal of chained TREs. Remove buf_left from the while loop condition to avoid exiting prematurely before reading the ring completely, and remove write_offset since it will always be zero because the new cache buffer is allocated every time.
[...]
Applied, thanks!
[1/1] bus: mhi: ep: Fix chained transfer handling in read path commit: f5225a34bd8f9f64eec37f6ae1461289aaa3eb86
Best regards,
linux-stable-mirror@lists.linaro.org