The functions kvaser_pciefd_start_xmit() and kvaser_pciefd_handle_ack_packet() raced to stop/wake TX queues and get/put echo skbs, as kvaser_pciefd_can->echo_lock was only ever taken when transmitting. E.g., this caused the following error:
can_put_echo_skb: BUG! echo_skb 5 is occupied!
Instead, use the synchronization helpers in netdev_queues.h. As those piggyback on BQL barriers, start updating in-flight packets and bytes counts as well.
Cc: stable@vger.kernel.org Signed-off-by: Axel Forsman axfo@kvaser.com Tested-by: Jimmy Assarsson extja@kvaser.com Reviewed-by: Jimmy Assarsson extja@kvaser.com --- drivers/net/can/kvaser_pciefd.c | 70 ++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 22 deletions(-)
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 0d1b895509c3..6251a1ddfa7e 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> +#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB support@kvaser.com"); @@ -412,8 +413,9 @@ struct kvaser_pciefd_can { u8 cmd_seq; int err_rep_cnt; int echo_idx; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ - spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; @@ -745,11 +747,24 @@ static int kvaser_pciefd_stop(struct net_device *netdev) del_timer(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); close_candev(netdev);
return ret; }
+static unsigned int kvaser_pciefd_tx_avail(struct kvaser_pciefd_can *can) +{ + u8 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, + ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + + if (count < can->can.echo_skb_max) /* Free TX FIFO slot? */ + /* Avoid reusing unacked seqno */ + return !can->can.echo_skb[can->echo_idx]; + else + return 0; +} + static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, struct kvaser_pciefd_can *can, struct sk_buff *skb) @@ -797,23 +812,31 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); - unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; + unsigned int frame_len = 0; int nr_words; - u8 count;
if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK;
+ /* + * Without room for a new message, stop the queue until at least + * one successful transmit. + */ + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY; + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, netdev, can->echo_idx, 0); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, can->echo_idx, frame_len);
/* Move echo index to the next slot */ can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ netdev_sent_queue(netdev, frame_len); + /* Write header to fifo */ iowrite32(packet.header[0], can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); @@ -836,15 +859,6 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, KVASER_PCIEFD_KCAN_FIFO_LAST_REG); }
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); - /* No room for a new message, stop the queue until at least one - * successful transmit - */ - if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) - netif_stop_queue(netdev); - spin_unlock_irqrestore(&can->echo_lock, irq_flags); - return NETDEV_TX_OK; }
@@ -970,6 +984,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; can->bec.txerr = 0; can->bec.rxerr = 0;
@@ -987,7 +1003,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->can.clock.freq = pcie->freq; can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->echo_idx = 0; - spin_lock_init(&can->echo_lock); spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const; @@ -1510,19 +1525,16 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); - int len; - u8 count; + unsigned int len, frame_len = 0; struct sk_buff *skb;
skb = can->can.echo_skb[echo_idx]; if (skb) kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - len = can_get_echo_skb(can->can.dev, echo_idx, NULL); - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) - netif_wake_queue(can->can.dev); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len;
if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; @@ -1638,11 +1650,25 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; + unsigned int i;
do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + return res; }
linux-stable-mirror@lists.linaro.org