4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Sagi Grimberg sagi@grimberg.me
[ Upstream commit 8407879c4e0d7731f6e7e905893cecf61a7762c7 ]
Currently we always repost the recv buffer before we send a response capsule back to the host. Since ordering is not guaranteed for send and recv completions, it is posible that we will receive a new request from the host before we got a send completion for the response capsule.
Today, we pre-allocate 2x rsps the length of the queue, but in reality, under heavy load there is nothing that is really preventing the gap to expand until we exhaust all our rsps.
To fix this, if we don't have any pre-allocated rsps left, we dynamically allocate a rsp and make sure to free it when we are done. If under memory pressure we fail to allocate a rsp, we silently drop the command and wait for the host to retry.
Reported-by: Steve Wise swise@opengridcomputing.com Tested-by: Steve Wise swise@opengridcomputing.com Signed-off-by: Sagi Grimberg sagi@grimberg.me [hch: dropped a superflous assignment] Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin alexander.levin@microsoft.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- drivers/nvme/target/rdma.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-)
--- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
struct nvmet_req req;
+ bool allocated; u8 n_rdma; u32 flags; u32 invalidate_rkey; @@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_que unsigned long flags;
spin_lock_irqsave(&queue->rsps_lock, flags); - rsp = list_first_entry(&queue->free_rsps, + rsp = list_first_entry_or_null(&queue->free_rsps, struct nvmet_rdma_rsp, free_list); - list_del(&rsp->free_list); + if (likely(rsp)) + list_del(&rsp->free_list); spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ if (unlikely(!rsp)) { + rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + if (unlikely(!rsp)) + return NULL; + rsp->allocated = true; + } + return rsp; }
@@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp { unsigned long flags;
+ if (rsp->allocated) { + kfree(rsp); + return; + } + spin_lock_irqsave(&rsp->queue->rsps_lock, flags); list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); @@ -756,6 +770,15 @@ static void nvmet_rdma_recv_done(struct
cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); + if (unlikely(!rsp)) { + /* + * we get here only under memory pressure, + * silently drop and have the host retry + * as we can't even fail it. + */ + nvmet_rdma_post_recv(queue->dev, cmd); + return; + } rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0;