From: Lang Cheng chenglang@huawei.com
[ Upstream commit e13026578b727becf2614f34a4f35e7f0ed21be1 ]
When a non-inline WR reuses a WQE that was used for inline last time, the remaining inline flag should be cleared.
Fixes: 62490fd5a865 ("RDMA/hns: Avoid unnecessary memset on WQEs in post_send") Link: https://lore.kernel.org/r/1624011020-16992-2-git-send-email-liweihang@huawei... Signed-off-by: Lang Cheng chenglang@huawei.com Signed-off-by: Weihang Li liweihang@huawei.com Signed-off-by: Jason Gunthorpe jgg@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 3344b80ecf04..851acc9d050f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -268,8 +268,6 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1); - if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { roce_set_bit(rc_sq_wqe->byte_20, V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0); @@ -314,6 +312,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, (*sge_ind) & (qp->sge.sge_cnt - 1));
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, + !!(wr->send_flags & IB_SEND_INLINE)); if (wr->send_flags & IB_SEND_INLINE) return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);