5.15-stable review patch. If anyone has any objections, please let me know.
------------------
From: Robert Hancock robert.hancock@calian.com
[ Upstream commit f0cf4000f5867ec4325d19d32bd83cf583065667 ]
The axienet_start_xmit function was updating the tx_bd_tail variable multiple times, with potential rollbacks on error or invalid intermediate positions, even though this variable is also used in the TX completion path. Use READ_ONCE where this variable is read and WRITE_ONCE where it is written to make this update more atomic, and move the write before the MMIO write to start the transfer, so it is protected by that implicit write barrier.
Signed-off-by: Robert Hancock robert.hancock@calian.com Signed-off-by: David S. Miller davem@davemloft.net Stable-dep-of: 5a6caa2cfabb ("net: xilinx: axienet: Fix packet counting") Signed-off-by: Sasha Levin sashal@kernel.org --- .../net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index fd5f7ac7f4a6b..bcecf4b7308c1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -747,7 +747,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
/* Ensure we see all descriptor updates from device or TX IRQ path */ rmb(); - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; + cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % + lp->tx_bd_num]; if (cur_p->cntrl) return NETDEV_TX_BUSY; return 0; @@ -808,12 +809,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 csum_index_off; skb_frag_t *frag; dma_addr_t tail_p, phys; + u32 orig_tail_ptr, new_tail_ptr; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; - u32 orig_tail_ptr = lp->tx_bd_tail; + + orig_tail_ptr = lp->tx_bd_tail; + new_tail_ptr = orig_tail_ptr;
num_frag = skb_shinfo(skb)->nr_frags; - cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + cur_p = &lp->tx_bd_v[orig_tail_ptr];
if (axienet_check_tx_bd_space(lp, num_frag + 1)) { /* Should not happen as last start_xmit call should have @@ -853,9 +857,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) { - if (++lp->tx_bd_tail >= lp->tx_bd_num) - lp->tx_bd_tail = 0; - cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + if (++new_tail_ptr >= lp->tx_bd_num) + new_tail_ptr = 0; + cur_p = &lp->tx_bd_v[new_tail_ptr]; frag = &skb_shinfo(skb)->frags[ii]; phys = dma_map_single(lp->dev, skb_frag_address(frag), @@ -867,8 +871,6 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ndev->stats.tx_dropped++; axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, NULL); - lp->tx_bd_tail = orig_tail_ptr; - return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -878,11 +880,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; + if (++new_tail_ptr >= lp->tx_bd_num) + new_tail_ptr = 0; + WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); + /* Start the transfer */ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); - if (++lp->tx_bd_tail >= lp->tx_bd_num) - lp->tx_bd_tail = 0;
/* Stop queue if next transmit may not have space */ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {